desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix gid: 24118 name: e24-h19-740xd - available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix gid: 34118 name: e24-h21-740xd monmap: created: '2019-04-16 17:59:51.468270' epoch: 1 features: optional: [] persistent: - kraken - luminous - mimic - osdmap-prune - nautilus fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf min_mon_release: 14 min_mon_release_name: nautilus modified: '2019-04-16 17:59:51.468270' mons: - addr: 10.1.24.17:6789/0 name: e24-h17-740xd public_addr: 10.1.24.17:6789/0 public_addrs: addrvec: - addr: 10.1.24.17:3300 nonce: 0 type: v2 - addr: 10.1.24.17:6789 nonce: 0 type: v1 rank: 0 - addr: 10.1.24.19:6789/0 name: e24-h19-740xd public_addr: 10.1.24.19:6789/0 public_addrs: addrvec: - addr: 10.1.24.19:3300 nonce: 0 type: v2 - addr: 10.1.24.19:6789 nonce: 0 type: v1 rank: 1 - addr: 10.1.24.21:6789/0 name: e24-h21-740xd public_addr: 10.1.24.21:6789/0 public_addrs: addrvec: - addr: 10.1.24.21:3300 nonce: 0 type: v2 - addr: 10.1.24.21:6789 nonce: 0 type: v1 rank: 2 osdmap: osdmap: epoch: 17 full: false nearfull: false num_in_osds: 0 num_osds: 0 num_remapped_pgs: 0 num_up_osds: 0 pgmap: bytes_avail: 0 bytes_total: 0 bytes_used: 0 data_bytes: 0 num_objects: 0 num_pgs: 0 num_pools: 0 pgs_by_state: [] progress_events: {} quorum: - 0 - 1 - 2 quorum_age: 39 quorum_names: - e24-h17-740xd - e24-h19-740xd - e24-h21-740xd servicemap: epoch: 1 modified: '0.000000' services: {} ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_current_status: election_epoch: 14 fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf fsmap: by_rank: [] epoch: 1 up:standby: 0 health: checks: {} status: HEALTH_OK mgrmap: active_addr: 10.1.24.17:6801/64181 active_addrs: addrvec: - addr: 10.1.24.17:6800 nonce: 64181 type: v2 - addr: 10.1.24.17:6801 nonce: 64181 type: v1 active_change: '2019-04-16 19:52:49.041520' active_gid: 14115 active_name: e24-h17-740xd always_on_modules: nautilus: - balancer - crash - devicehealth - orchestrator_cli - progress - status - volumes available: true available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix epoch: 5 modules: - iostat - restful services: {} standbys: - available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix gid: 24118 name: e24-h19-740xd - available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix gid: 34118 name: e24-h21-740xd monmap: created: '2019-04-16 17:59:51.468270' epoch: 1 features: optional: [] persistent: - kraken - luminous - mimic - osdmap-prune - nautilus fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf min_mon_release: 14 min_mon_release_name: nautilus modified: '2019-04-16 17:59:51.468270' mons: - addr: 10.1.24.17:6789/0 name: e24-h17-740xd public_addr: 10.1.24.17:6789/0 public_addrs: addrvec: - addr: 10.1.24.17:3300 nonce: 0 type: v2 - addr: 10.1.24.17:6789 nonce: 0 type: v1 rank: 0 - addr: 10.1.24.19:6789/0 name: e24-h19-740xd public_addr: 10.1.24.19:6789/0 public_addrs: addrvec: - addr: 10.1.24.19:3300 nonce: 0 type: v2 - addr: 10.1.24.19:6789 nonce: 0 type: v1 rank: 1 - addr: 10.1.24.21:6789/0 name: e24-h21-740xd public_addr: 10.1.24.21:6789/0 public_addrs: addrvec: - addr: 10.1.24.21:3300 nonce: 0 type: v2 - addr: 10.1.24.21:6789 nonce: 0 type: v1 rank: 2 osdmap: osdmap: epoch: 17 full: false nearfull: false num_in_osds: 0 num_osds: 0 num_remapped_pgs: 0 num_up_osds: 0 pgmap: bytes_avail: 0 bytes_total: 0 bytes_used: 0 data_bytes: 0 num_objects: 0 num_pgs: 0 num_pools: 0 pgs_by_state: [] progress_events: {} quorum: - 0 - 1 - 2 quorum_age: 39 quorum_names: - e24-h17-740xd - e24-h19-740xd - e24-h21-740xd servicemap: epoch: 1 modified: '0.000000' services: {} ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_current_status: election_epoch: 14 fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf fsmap: by_rank: [] epoch: 1 up:standby: 0 health: checks: {} status: HEALTH_OK mgrmap: active_addr: 10.1.24.17:6801/64181 active_addrs: addrvec: - addr: 10.1.24.17:6800 nonce: 64181 type: v2 - addr: 10.1.24.17:6801 nonce: 64181 type: v1 active_change: '2019-04-16 19:52:49.041520' active_gid: 14115 active_name: e24-h17-740xd always_on_modules: nautilus: - balancer - crash - devicehealth - orchestrator_cli - progress - status - volumes available: true available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix epoch: 5 modules: - iostat - restful services: {} standbys: - available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix gid: 24118 name: e24-h19-740xd - available_modules: - can_run: true error_string: '' module_options: password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str server_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_url see_also: [] tags: [] type: str username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_server: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_server see_also: [] tags: [] type: str name: ansible - can_run: true error_string: '' module_options: active: default_value: 'False' desc: automatically balance PGs across cluster enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: active see_also: [] tags: [] type: bool begin_time: default_value: '0000' desc: beginning time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: begin_time see_also: [] tags: [] type: str begin_weekday: default_value: '0' desc: Restrict automatic balancing to this day of the week or later enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: begin_weekday see_also: [] tags: [] type: uint crush_compat_max_iterations: default_value: '25' desc: maximum number of iterations to attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '250' min: '1' name: crush_compat_max_iterations see_also: [] tags: [] type: uint crush_compat_metrics: default_value: pgs,objects,bytes desc: metrics with which to calculate OSD utilization enum_allowed: [] flags: 1 level: advanced long_desc: Value is a list of one or more of "pgs", "objects", or "bytes", and indicates which metrics to use to balance utilization. max: '' min: '' name: crush_compat_metrics see_also: [] tags: [] type: str crush_compat_step: default_value: '0.5' desc: aggressiveness of optimization enum_allowed: [] flags: 1 level: advanced long_desc: .99 is very aggressive, .01 is less aggressive max: '0.999' min: '0.001' name: crush_compat_step see_also: [] tags: [] type: float end_time: default_value: '2400' desc: ending time of day to automatically balance enum_allowed: [] flags: 1 level: advanced long_desc: This is a time of day in the format HHMM. max: '' min: '' name: end_time see_also: [] tags: [] type: str end_weekday: default_value: '7' desc: Restrict automatic balancing to days of the week earlier than this enum_allowed: [] flags: 1 level: advanced long_desc: 0 or 7 = Sunday, 1 = Monday, etc. max: '7' min: '0' name: end_weekday see_also: [] tags: [] type: uint min_score: default_value: '0' desc: minimum score, below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_score see_also: [] tags: [] type: float mode: default_value: none desc: Balancer mode enum_allowed: - crush-compat - none - upmap flags: 1 level: advanced long_desc: '' max: '' min: '' name: mode see_also: [] tags: [] type: str pool_ids: default_value: '' desc: pools which the automatic balancing will be limited to enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_ids see_also: [] tags: [] type: str sleep_interval: default_value: '60' desc: how frequently to wake up and attempt optimization enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs upmap_max_deviation: default_value: '0.01' desc: deviation below which no optimization is attempted enum_allowed: [] flags: 1 level: advanced long_desc: If the ratio between the fullest and least-full OSD is below this value then we stop trying to optimize placement. max: '1' min: '0' name: upmap_max_deviation see_also: [] tags: [] type: float upmap_max_iterations: default_value: '10' desc: maximum upmap optimization iterations enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: upmap_max_iterations see_also: [] tags: [] type: uint name: balancer - can_run: true error_string: '' module_options: {} name: crash - can_run: true error_string: '' module_options: salt_api_eauth: default_value: sharedsecret desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_eauth see_also: [] tags: [] type: str salt_api_password: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_password see_also: [] tags: [] type: str salt_api_url: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_url see_also: [] tags: [] type: str salt_api_username: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: salt_api_username see_also: [] tags: [] type: str name: deepsea - can_run: true error_string: '' module_options: enable_monitoring: default_value: 'False' desc: monitor device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: enable_monitoring see_also: [] tags: [] type: bool mark_out_threshold: default_value: '2419200' desc: automatically mark OSD if it may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: mark_out_threshold see_also: [] tags: [] type: secs pool_name: default_value: device_health_metrics desc: name of pool in which to store device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pool_name see_also: [] tags: [] type: str retention_period: default_value: '15552000' desc: how long to retain device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: retention_period see_also: [] tags: [] type: secs scrape_frequency: default_value: '86400' desc: how frequently to scrape device health metrics enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: scrape_frequency see_also: [] tags: [] type: secs self_heal: default_value: 'True' desc: preemptively heal cluster around devices that may fail enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: self_heal see_also: [] tags: [] type: bool sleep_interval: default_value: '600' desc: how frequently to wake up and check device health enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: secs warn_threshold: default_value: '7257600' desc: raise health warning if OSD may fail before this long enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: warn_threshold see_also: [] tags: [] type: secs name: devicehealth - can_run: false error_string: influxdb python module not found module_options: batch_size: default_value: '5000' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: batch_size see_also: [] tags: [] type: str database: default_value: ceph desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: database see_also: [] tags: [] type: str hostname: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: hostname see_also: [] tags: [] type: str interval: default_value: '30' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: str password: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: password see_also: [] tags: [] type: str port: default_value: '8086' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: port see_also: [] tags: [] type: str ssl: default_value: 'false' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: ssl see_also: [] tags: [] type: str threads: default_value: '5' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: threads see_also: [] tags: [] type: str username: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: username see_also: [] tags: [] type: str verify_ssl: default_value: 'true' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: verify_ssl see_also: [] tags: [] type: str name: influx - can_run: true error_string: '' module_options: {} name: insights - can_run: true error_string: '' module_options: {} name: iostat - can_run: true error_string: '' module_options: failure_domain: default_value: host desc: failure domain for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: failure_domain see_also: [] tags: [] type: str min_size: default_value: '' desc: default min_size for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: min_size see_also: [] tags: [] type: int num_rep: default_value: '3' desc: default replica count for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: num_rep see_also: [] tags: [] type: int pg_num: default_value: '128' desc: default pg_num for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: pg_num see_also: [] tags: [] type: int prefix: default_value: '' desc: name prefix for any created local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: prefix see_also: [] tags: [] type: str subtree: default_value: rack desc: CRUSH level for which to create a local pool enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: subtree see_also: [] tags: [] type: str name: localpool - can_run: true error_string: '' module_options: orchestrator: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: orchestrator see_also: [] tags: [] type: str name: orchestrator_cli - can_run: true error_string: '' module_options: sleep_interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: sleep_interval see_also: [] tags: [] type: str name: pg_autoscaler - can_run: true error_string: '' module_options: max_completed_events: default_value: '50' desc: number of past completed events to remember enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: max_completed_events see_also: [] tags: [] type: int persist_interval: default_value: '5' desc: how frequently to persist completed events enum_allowed: [] flags: 1 level: advanced long_desc: '' max: '' min: '' name: persist_interval see_also: [] tags: [] type: secs name: progress - can_run: true error_string: '' module_options: rbd_stats_pools: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools see_also: [] tags: [] type: str rbd_stats_pools_refresh_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rbd_stats_pools_refresh_interval see_also: [] tags: [] type: str scrape_interval: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: scrape_interval see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: prometheus - can_run: true error_string: '' module_options: {} name: rbd_support - can_run: true error_string: '' module_options: key_file: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: key_file see_also: [] tags: [] type: str server_addr: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_addr see_also: [] tags: [] type: str server_port: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: server_port see_also: [] tags: [] type: str name: restful - can_run: true error_string: '' module_options: roption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption1 see_also: [] tags: [] type: str roption2: default_value: xyz desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: roption2 see_also: [] tags: [] type: str rwoption1: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption1 see_also: [] tags: [] type: str rwoption2: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption2 see_also: [] tags: [] type: int rwoption3: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption3 see_also: [] tags: [] type: float rwoption4: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption4 see_also: [] tags: [] type: str rwoption5: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption5 see_also: [] tags: [] type: bool rwoption6: default_value: 'True' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: rwoption6 see_also: [] tags: [] type: bool testkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testkey see_also: [] tags: [] type: str testlkey: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testlkey see_also: [] tags: [] type: str testnewline: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: testnewline see_also: [] tags: [] type: str name: selftest - can_run: true error_string: '' module_options: {} name: status - can_run: true error_string: '' module_options: address: default_value: unixgram:///tmp/telegraf.sock desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: address see_also: [] tags: [] type: str interval: default_value: '15' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs name: telegraf - can_run: true error_string: '' module_options: contact: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: contact see_also: [] tags: [] type: str description: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: description see_also: [] tags: [] type: str enabled: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: enabled see_also: [] tags: [] type: bool interval: default_value: '72' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '24' name: interval see_also: [] tags: [] type: int leaderboard: default_value: 'False' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: leaderboard see_also: [] tags: [] type: bool organization: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: organization see_also: [] tags: [] type: str proxy: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: proxy see_also: [] tags: [] type: str url: default_value: https://telemetry.ceph.com/report desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: url see_also: [] tags: [] type: str name: telemetry - can_run: true error_string: '' module_options: {} name: test_orchestrator - can_run: true error_string: '' module_options: {} name: volumes - can_run: true error_string: '' module_options: identifier: default_value: '' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: identifier see_also: [] tags: [] type: str interval: default_value: '60' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: interval see_also: [] tags: [] type: secs zabbix_host: default_value: None desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_host see_also: [] tags: [] type: str zabbix_port: default_value: '10051' desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_port see_also: [] tags: [] type: int zabbix_sender: default_value: /usr/bin/zabbix_sender desc: '' enum_allowed: [] flags: 0 level: advanced long_desc: '' max: '' min: '' name: zabbix_sender see_also: [] tags: [] type: str name: zabbix gid: 34118 name: e24-h21-740xd monmap: created: '2019-04-16 17:59:51.468270' epoch: 1 features: optional: [] persistent: - kraken - luminous - mimic - osdmap-prune - nautilus fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf min_mon_release: 14 min_mon_release_name: nautilus modified: '2019-04-16 17:59:51.468270' mons: - addr: 10.1.24.17:6789/0 name: e24-h17-740xd public_addr: 10.1.24.17:6789/0 public_addrs: addrvec: - addr: 10.1.24.17:3300 nonce: 0 type: v2 - addr: 10.1.24.17:6789 nonce: 0 type: v1 rank: 0 - addr: 10.1.24.19:6789/0 name: e24-h19-740xd public_addr: 10.1.24.19:6789/0 public_addrs: addrvec: - addr: 10.1.24.19:3300 nonce: 0 type: v2 - addr: 10.1.24.19:6789 nonce: 0 type: v1 rank: 1 - addr: 10.1.24.21:6789/0 name: e24-h21-740xd public_addr: 10.1.24.21:6789/0 public_addrs: addrvec: - addr: 10.1.24.21:3300 nonce: 0 type: v2 - addr: 10.1.24.21:6789 nonce: 0 type: v1 rank: 2 osdmap: osdmap: epoch: 17 full: false nearfull: false num_in_osds: 0 num_osds: 0 num_remapped_pgs: 0 num_up_osds: 0 pgmap: bytes_avail: 0 bytes_total: 0 bytes_used: 0 data_bytes: 0 num_objects: 0 num_pgs: 0 num_pools: 0 pgs_by_state: [] progress_events: {} quorum: - 0 - 1 - 2 quorum_age: 39 quorum_names: - e24-h17-740xd - e24-h19-740xd - e24-h21-740xd servicemap: epoch: 1 modified: '0.000000' services: {} TASK [ceph-facts : set_fact fsid from ceph_current_status] ************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:91 Tuesday 16 April 2019 19:53:15 +0000 (0:00:00.998) 0:02:23.669 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: fsid: b2987ad0-ea8c-4c34-b1cc-28c50fe789cf TASK [ceph-facts : generate cluster fsid] ***************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:98 Tuesday 16 April 2019 19:53:15 +0000 (0:00:00.186) 0:02:23.855 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact fsid] ************************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:105 Tuesday 16 April 2019 19:53:15 +0000 (0:00:00.042) 0:02:23.898 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact mds_name ansible_hostname] **************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:113 Tuesday 16 April 2019 19:53:15 +0000 (0:00:00.121) 0:02:24.020 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: mds_name: e23-h05-740xd ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: mds_name: e24-h05-740xd ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: mds_name: e24-h07-740xd ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: mds_name: e23-h07-740xd TASK [ceph-facts : set_fact mds_name ansible_fqdn] ******************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:119 Tuesday 16 April 2019 19:53:15 +0000 (0:00:00.218) 0:02:24.238 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact rbd_client_directory_owner ceph] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:125 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.110) 0:02:24.349 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact rbd_client_directory_group rbd_client_directory_group] ************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:132 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.111) 0:02:24.461 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact rbd_client_directory_mode 0770] *********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:139 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.111) 0:02:24.573 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : resolve device link(s)] **************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:146 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.112) 0:02:24.685 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false item: /dev/nvme0n1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false item: /dev/nvme1n1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false item: /dev/nvme2n1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false item: /dev/nvme0n1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false item: /dev/nvme3n1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false item: /dev/nvme1n1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false item: /dev/nvme4n1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false item: /dev/nvme2n1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false item: /dev/nvme0n1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false item: /dev/nvme3n1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false item: /dev/nvme1n1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false item: /dev/nvme4n1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false item: /dev/nvme2n1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false item: /dev/nvme3n1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false item: /dev/nvme0n1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false item: /dev/nvme4n1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false item: /dev/nvme1n1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false item: /dev/nvme2n1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false item: /dev/nvme3n1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false item: /dev/nvme4n1 skip_reason: Conditional result was False TASK [ceph-facts : set_fact build devices from resolved symlinks] ***************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:157 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.165) 0:02:24.851 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme0n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme0n1'}) => changed=false item: changed: false item: /dev/nvme0n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme1n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme1n1'}) => changed=false item: changed: false item: /dev/nvme1n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme2n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme2n1'}) => changed=false item: changed: false item: /dev/nvme2n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme0n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme0n1'}) => changed=false item: changed: false item: /dev/nvme0n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme3n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme3n1'}) => changed=false item: changed: false item: /dev/nvme3n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme1n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme1n1'}) => changed=false item: changed: false item: /dev/nvme1n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme4n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme4n1'}) => changed=false item: changed: false item: /dev/nvme4n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme2n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme2n1'}) => changed=false item: changed: false item: /dev/nvme2n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme0n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme0n1'}) => changed=false item: changed: false item: /dev/nvme0n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme3n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme3n1'}) => changed=false item: changed: false item: /dev/nvme3n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme1n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme1n1'}) => changed=false item: changed: false item: /dev/nvme1n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme4n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme4n1'}) => changed=false item: changed: false item: /dev/nvme4n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme2n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme2n1'}) => changed=false item: changed: false item: /dev/nvme2n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme3n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme3n1'}) => changed=false item: changed: false item: /dev/nvme3n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme0n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme0n1'}) => changed=false item: changed: false item: /dev/nvme0n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme4n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme4n1'}) => changed=false item: changed: false item: /dev/nvme4n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme1n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme1n1'}) => changed=false item: changed: false item: /dev/nvme1n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme2n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme2n1'}) => changed=false item: changed: false item: /dev/nvme2n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme3n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme3n1'}) => changed=false item: changed: false item: /dev/nvme3n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'changed': False, 'skipped': True, 'skip_reason': 'Conditional result was False', '_ansible_no_log': False, 'item': '/dev/nvme4n1', '_ansible_item_result': True, '_ansible_ignore_errors': None, '_ansible_item_label': '/dev/nvme4n1'}) => changed=false item: changed: false item: /dev/nvme4n1 skip_reason: Conditional result was False skipped: true skip_reason: Conditional result was False TASK [ceph-facts : set_fact build final devices list] ***************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:167 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.167) 0:02:25.018 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact devices generate device list when osd_auto_discovery] ************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:176 Tuesday 16 April 2019 19:53:16 +0000 (0:00:00.118) 0:02:25.137 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e23--h05--740xd-swap', 'dm-uuid-LVM-KTm1KdKUKOHDkJWzRCj3Q0v0SM3mE9Fa83be0foapGdZcNHwF978WCMHOQk80SKs'], 'uuids': ['03ff6531-787c-4726-9a1b-512865e75946'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '00ae8b83b86e5aeb2200f23a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '8388608', 'sectorsize': '512', 'size': '4.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-1 value: holders: [] host: '' links: ids: - dm-name-rhel_e23--h05--740xd-swap - dm-uuid-LVM-KTm1KdKUKOHDkJWzRCj3Q0v0SM3mE9Fa83be0foapGdZcNHwF978WCMHOQk80SKs labels: [] masters: [] uuids: - 03ff6531-787c-4726-9a1b-512865e75946 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '8388608' sectorsize: '512' serial: 00ae8b83b86e5aeb2200f23a59604609 size: 4.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200360', 'nvme-eui.333959304b2003600025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme0n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200360 - nvme-eui.333959304b2003600025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme3n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200362', 'nvme-eui.333959304b2003620025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme3n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200362 - nvme-eui.333959304b2003620025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme2n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200282', 'nvme-eui.333959304b2002820025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme2n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200282 - nvme-eui.333959304b2002820025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e23--h05--740xd-home', 'dm-uuid-LVM-KTm1KdKUKOHDkJWzRCj3Q0v0SM3mE9Fa5PK1Zq33SMazG3hggYXzKwiw9BjHuWgL'], 'uuids': ['362124e7-e989-488e-8b01-3daba39b8369'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '00ae8b83b86e5aeb2200f23a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '821288960', 'sectorsize': '512', 'size': '391.62 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-2 value: holders: [] host: '' links: ids: - dm-name-rhel_e23--h05--740xd-home - dm-uuid-LVM-KTm1KdKUKOHDkJWzRCj3Q0v0SM3mE9Fa5PK1Zq33SMazG3hggYXzKwiw9BjHuWgL labels: [] masters: [] uuids: - 362124e7-e989-488e-8b01-3daba39b8369 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '821288960' sectorsize: '512' serial: 00ae8b83b86e5aeb2200f23a59604609 size: 391.62 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e23--h05--740xd-root', 'dm-uuid-LVM-KTm1KdKUKOHDkJWzRCj3Q0v0SM3mE9FaKvat9iUehrITIvmT0PHS5t1YPy4KOMpe'], 'uuids': ['79f2e03c-d36f-4635-b497-3c2ae0301d40'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '00ae8b83b86e5aeb2200f23a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '104857600', 'sectorsize': '512', 'size': '50.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-0 value: holders: [] host: '' links: ids: - dm-name-rhel_e23--h05--740xd-root - dm-uuid-LVM-KTm1KdKUKOHDkJWzRCj3Q0v0SM3mE9FaKvat9iUehrITIvmT0PHS5t1YPy4KOMpe labels: [] masters: [] uuids: - 79f2e03c-d36f-4635-b497-3c2ae0301d40 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '104857600' sectorsize: '512' serial: 00ae8b83b86e5aeb2200f23a59604609 size: 50.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e24--h05--740xd-swap', 'dm-uuid-LVM-K0UgaB7rYJjfeF0StounUfKCI3U2RGcTQmdGo1ldOz1ygNvi86HxLU6fHmjNLRe6'], 'uuids': ['1e952bda-f45d-4bfa-81aa-3ae675448a73'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '00a102440ca75eeb2200df3a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '8388608', 'sectorsize': '512', 'size': '4.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-1 value: holders: [] host: '' links: ids: - dm-name-rhel_e24--h05--740xd-swap - dm-uuid-LVM-K0UgaB7rYJjfeF0StounUfKCI3U2RGcTQmdGo1ldOz1ygNvi86HxLU6fHmjNLRe6 labels: [] masters: [] uuids: - 1e952bda-f45d-4bfa-81aa-3ae675448a73 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '8388608' sectorsize: '512' serial: 00a102440ca75eeb2200df3a59604609 size: 4.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200359', 'nvme-eui.333959304b2003590025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme0n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200359 - nvme-eui.333959304b2003590025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme1n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200364', 'nvme-eui.333959304b2003640025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme1n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200364 - nvme-eui.333959304b2003640025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme3n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200272', 'nvme-eui.333959304b2002720025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme3n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200272 - nvme-eui.333959304b2002720025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['scsi-36d094660593af20022eb5a6eb8838bae', 'wwn-0x6d094660593af20022eb5a6eb8838bae'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'DELL', 'model': 'PERC H740P Adp', 'sas_address': None, 'sas_device_handle': None, 'serial': '00ae8b83b86e5aeb2200f23a59604609', 'removable': '0', 'support_discard': '0', 'wwn': '0x6d094660593af20022eb5a6eb8838bae', 'partitions': {'sda2': {'links': {'ids': ['lvm-pv-uuid-PTcBBO-UL1Q-VhdP-0HcP-47lM-u0co-JqtHmg', 'scsi-36d094660593af20022eb5a6eb8838bae-part2', 'wwn-0x6d094660593af20022eb5a6eb8838bae-part2'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2']}, 'start': '2099200', 'sectors': '934541312', 'sectorsize': 512, 'size': '445.62 GB', 'uuid': None, 'holders': ['rhel_e23--h05--740xd-swap', 'rhel_e23--h05--740xd-home', 'rhel_e23--h05--740xd-root']}, 'sda1': {'links': {'ids': ['scsi-36d094660593af20022eb5a6eb8838bae-part1', 'wwn-0x6d094660593af20022eb5a6eb8838bae-part1'], 'uuids': ['e86c903a-fea6-481e-92fe-076925e24cd2'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '2097152', 'sectorsize': 512, 'size': '1.00 GB', 'uuid': 'e86c903a-fea6-481e-92fe-076925e24cd2', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '936640512', 'sectorsize': '512', 'size': '446.62 GB', 'host': 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)', 'holders': []}}) => changed=false item: key: sda value: holders: [] host: 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)' links: ids: - scsi-36d094660593af20022eb5a6eb8838bae - wwn-0x6d094660593af20022eb5a6eb8838bae labels: [] masters: [] uuids: [] model: PERC H740P Adp partitions: sda1: holders: [] links: ids: - scsi-36d094660593af20022eb5a6eb8838bae-part1 - wwn-0x6d094660593af20022eb5a6eb8838bae-part1 labels: [] masters: [] uuids: - e86c903a-fea6-481e-92fe-076925e24cd2 sectors: '2097152' sectorsize: 512 size: 1.00 GB start: '2048' uuid: e86c903a-fea6-481e-92fe-076925e24cd2 sda2: holders: - rhel_e23--h05--740xd-swap - rhel_e23--h05--740xd-home - rhel_e23--h05--740xd-root links: ids: - lvm-pv-uuid-PTcBBO-UL1Q-VhdP-0HcP-47lM-u0co-JqtHmg - scsi-36d094660593af20022eb5a6eb8838bae-part2 - wwn-0x6d094660593af20022eb5a6eb8838bae-part2 labels: [] masters: - dm-0 - dm-1 - dm-2 uuids: [] sectors: '934541312' sectorsize: 512 size: 445.62 GB start: '2099200' uuid: null removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '936640512' sectorsize: '512' serial: 00ae8b83b86e5aeb2200f23a59604609 size: 446.62 GB support_discard: '0' vendor: DELL virtual: 1 wwn: '0x6d094660593af20022eb5a6eb8838bae' skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme2n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200508', 'nvme-eui.333959304b2005080025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme2n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200508 - nvme-eui.333959304b2005080025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme4n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200507', 'nvme-eui.333959304b2005070025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme4n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200507 - nvme-eui.333959304b2005070025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e24--h05--740xd-home', 'dm-uuid-LVM-K0UgaB7rYJjfeF0StounUfKCI3U2RGcTCzL5hoLlhIMI3q9fXNeAvpXXToifhb9e'], 'uuids': ['f7e5a509-aeb2-4063-8bf7-ecfd7f6b16ca'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '00a102440ca75eeb2200df3a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '821288960', 'sectorsize': '512', 'size': '391.62 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-2 value: holders: [] host: '' links: ids: - dm-name-rhel_e24--h05--740xd-home - dm-uuid-LVM-K0UgaB7rYJjfeF0StounUfKCI3U2RGcTCzL5hoLlhIMI3q9fXNeAvpXXToifhb9e labels: [] masters: [] uuids: - f7e5a509-aeb2-4063-8bf7-ecfd7f6b16ca model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '821288960' sectorsize: '512' serial: 00a102440ca75eeb2200df3a59604609 size: 391.62 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e24--h05--740xd-root', 'dm-uuid-LVM-K0UgaB7rYJjfeF0StounUfKCI3U2RGcT7LGFd6Nqa90j4sWKXPJJMbw35TY39I0d'], 'uuids': ['c5b5e389-0999-493f-b155-e151c31dcb8a'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '00a102440ca75eeb2200df3a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '104857600', 'sectorsize': '512', 'size': '50.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-0 value: holders: [] host: '' links: ids: - dm-name-rhel_e24--h05--740xd-root - dm-uuid-LVM-K0UgaB7rYJjfeF0StounUfKCI3U2RGcT7LGFd6Nqa90j4sWKXPJJMbw35TY39I0d labels: [] masters: [] uuids: - c5b5e389-0999-493f-b155-e151c31dcb8a model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '104857600' sectorsize: '512' serial: 00a102440ca75eeb2200df3a59604609 size: 50.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme1n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200184', 'nvme-eui.333959304b2001840025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme1n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200184 - nvme-eui.333959304b2001840025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e24--h07--740xd-swap', 'dm-uuid-LVM-442cDUgf2SsZ1kDybMX9YLoVrgtZeIClrAmzXC17xxe0ZtRbVDkjGCvjSWeoaI87'], 'uuids': ['5b641115-cd74-4ef7-9fcc-f8d097d98265'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '0062e83904a95eeb2200d5765b604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '8388608', 'sectorsize': '512', 'size': '4.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-1 value: holders: [] host: '' links: ids: - dm-name-rhel_e24--h07--740xd-swap - dm-uuid-LVM-442cDUgf2SsZ1kDybMX9YLoVrgtZeIClrAmzXC17xxe0ZtRbVDkjGCvjSWeoaI87 labels: [] masters: [] uuids: - 5b641115-cd74-4ef7-9fcc-f8d097d98265 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '8388608' sectorsize: '512' serial: 0062e83904a95eeb2200d5765b604609 size: 4.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['scsi-36d094660593adf0022eb5ea70c4402a1', 'wwn-0x6d094660593adf0022eb5ea70c4402a1'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'DELL', 'model': 'PERC H740P Adp', 'sas_address': None, 'sas_device_handle': None, 'serial': '00a102440ca75eeb2200df3a59604609', 'removable': '0', 'support_discard': '0', 'wwn': '0x6d094660593adf0022eb5ea70c4402a1', 'partitions': {'sda2': {'links': {'ids': ['lvm-pv-uuid-1AfcRD-xdYt-QtJX-x7wd-MLCO-jTLY-pmebhq', 'scsi-36d094660593adf0022eb5ea70c4402a1-part2', 'wwn-0x6d094660593adf0022eb5ea70c4402a1-part2'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2']}, 'start': '2099200', 'sectors': '934541312', 'sectorsize': 512, 'size': '445.62 GB', 'uuid': None, 'holders': ['rhel_e24--h05--740xd-swap', 'rhel_e24--h05--740xd-home', 'rhel_e24--h05--740xd-root']}, 'sda1': {'links': {'ids': ['scsi-36d094660593adf0022eb5ea70c4402a1-part1', 'wwn-0x6d094660593adf0022eb5ea70c4402a1-part1'], 'uuids': ['d1e690f1-6f87-41f5-b6a6-5b7d85ad1a65'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '2097152', 'sectorsize': 512, 'size': '1.00 GB', 'uuid': 'd1e690f1-6f87-41f5-b6a6-5b7d85ad1a65', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '936640512', 'sectorsize': '512', 'size': '446.62 GB', 'host': 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)', 'holders': []}}) => changed=false item: key: sda value: holders: [] host: 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)' links: ids: - scsi-36d094660593adf0022eb5ea70c4402a1 - wwn-0x6d094660593adf0022eb5ea70c4402a1 labels: [] masters: [] uuids: [] model: PERC H740P Adp partitions: sda1: holders: [] links: ids: - scsi-36d094660593adf0022eb5ea70c4402a1-part1 - wwn-0x6d094660593adf0022eb5ea70c4402a1-part1 labels: [] masters: [] uuids: - d1e690f1-6f87-41f5-b6a6-5b7d85ad1a65 sectors: '2097152' sectorsize: 512 size: 1.00 GB start: '2048' uuid: d1e690f1-6f87-41f5-b6a6-5b7d85ad1a65 sda2: holders: - rhel_e24--h05--740xd-swap - rhel_e24--h05--740xd-home - rhel_e24--h05--740xd-root links: ids: - lvm-pv-uuid-1AfcRD-xdYt-QtJX-x7wd-MLCO-jTLY-pmebhq - scsi-36d094660593adf0022eb5ea70c4402a1-part2 - wwn-0x6d094660593adf0022eb5ea70c4402a1-part2 labels: [] masters: - dm-0 - dm-1 - dm-2 uuids: [] sectors: '934541312' sectorsize: 512 size: 445.62 GB start: '2099200' uuid: null removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '936640512' sectorsize: '512' serial: 00a102440ca75eeb2200df3a59604609 size: 446.62 GB support_discard: '0' vendor: DELL virtual: 1 wwn: '0x6d094660593adf0022eb5ea70c4402a1' skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200496', 'nvme-eui.333959304b2004960025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme0n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200496 - nvme-eui.333959304b2004960025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme4n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200499', 'nvme-eui.333959304b2004990025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme4n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200499 - nvme-eui.333959304b2004990025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme3n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200276', 'nvme-eui.333959304b2002760025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme3n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200276 - nvme-eui.333959304b2002760025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme2n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200365', 'nvme-eui.333959304b2003650025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme2n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200365 - nvme-eui.333959304b2003650025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e24--h07--740xd-home', 'dm-uuid-LVM-442cDUgf2SsZ1kDybMX9YLoVrgtZeIClYRW4ftQfpYhgZonGGkw46yFCmFDgGBAz'], 'uuids': ['51094b30-1ac0-45b3-bc23-7c0889e38701'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '0062e83904a95eeb2200d5765b604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '821288960', 'sectorsize': '512', 'size': '391.62 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-2 value: holders: [] host: '' links: ids: - dm-name-rhel_e24--h07--740xd-home - dm-uuid-LVM-442cDUgf2SsZ1kDybMX9YLoVrgtZeIClYRW4ftQfpYhgZonGGkw46yFCmFDgGBAz labels: [] masters: [] uuids: - 51094b30-1ac0-45b3-bc23-7c0889e38701 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '821288960' sectorsize: '512' serial: 0062e83904a95eeb2200d5765b604609 size: 391.62 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e24--h07--740xd-root', 'dm-uuid-LVM-442cDUgf2SsZ1kDybMX9YLoVrgtZeICl12R31efJRMutewOtj2XjIaCCg8yJIKJz'], 'uuids': ['2635fa18-fa94-4a67-9962-593817cee965'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '0062e83904a95eeb2200d5765b604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '104857600', 'sectorsize': '512', 'size': '50.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-0 value: holders: [] host: '' links: ids: - dm-name-rhel_e24--h07--740xd-root - dm-uuid-LVM-442cDUgf2SsZ1kDybMX9YLoVrgtZeICl12R31efJRMutewOtj2XjIaCCg8yJIKJz labels: [] masters: [] uuids: - 2635fa18-fa94-4a67-9962-593817cee965 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '104857600' sectorsize: '512' serial: 0062e83904a95eeb2200d5765b604609 size: 50.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme1n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200278', 'nvme-eui.333959304b2002780025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme1n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200278 - nvme-eui.333959304b2002780025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['scsi-36d0946605b76d50022eb5ea90439e862', 'wwn-0x6d0946605b76d50022eb5ea90439e862'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'DELL', 'model': 'PERC H740P Adp', 'sas_address': None, 'sas_device_handle': None, 'serial': '0062e83904a95eeb2200d5765b604609', 'removable': '0', 'support_discard': '0', 'wwn': '0x6d0946605b76d50022eb5ea90439e862', 'partitions': {'sda2': {'links': {'ids': ['lvm-pv-uuid-1ediqi-Tc5a-2oA0-ivfh-2v0F-vCWC-dIrYBq', 'scsi-36d0946605b76d50022eb5ea90439e862-part2', 'wwn-0x6d0946605b76d50022eb5ea90439e862-part2'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2']}, 'start': '2099200', 'sectors': '934541312', 'sectorsize': 512, 'size': '445.62 GB', 'uuid': None, 'holders': ['rhel_e24--h07--740xd-swap', 'rhel_e24--h07--740xd-home', 'rhel_e24--h07--740xd-root']}, 'sda1': {'links': {'ids': ['scsi-36d0946605b76d50022eb5ea90439e862-part1', 'wwn-0x6d0946605b76d50022eb5ea90439e862-part1'], 'uuids': ['2caa87d9-91ef-4c5f-9a61-376be182a2e2'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '2097152', 'sectorsize': 512, 'size': '1.00 GB', 'uuid': '2caa87d9-91ef-4c5f-9a61-376be182a2e2', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '936640512', 'sectorsize': '512', 'size': '446.62 GB', 'host': 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)', 'holders': []}}) => changed=false item: key: sda value: holders: [] host: 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)' links: ids: - scsi-36d0946605b76d50022eb5ea90439e862 - wwn-0x6d0946605b76d50022eb5ea90439e862 labels: [] masters: [] uuids: [] model: PERC H740P Adp partitions: sda1: holders: [] links: ids: - scsi-36d0946605b76d50022eb5ea90439e862-part1 - wwn-0x6d0946605b76d50022eb5ea90439e862-part1 labels: [] masters: [] uuids: - 2caa87d9-91ef-4c5f-9a61-376be182a2e2 sectors: '2097152' sectorsize: 512 size: 1.00 GB start: '2048' uuid: 2caa87d9-91ef-4c5f-9a61-376be182a2e2 sda2: holders: - rhel_e24--h07--740xd-swap - rhel_e24--h07--740xd-home - rhel_e24--h07--740xd-root links: ids: - lvm-pv-uuid-1ediqi-Tc5a-2oA0-ivfh-2v0F-vCWC-dIrYBq - scsi-36d0946605b76d50022eb5ea90439e862-part2 - wwn-0x6d0946605b76d50022eb5ea90439e862-part2 labels: [] masters: - dm-0 - dm-1 - dm-2 uuids: [] sectors: '934541312' sectorsize: 512 size: 445.62 GB start: '2099200' uuid: null removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '936640512' sectorsize: '512' serial: 0062e83904a95eeb2200d5765b604609 size: 446.62 GB support_discard: '0' vendor: DELL virtual: 1 wwn: '0x6d0946605b76d50022eb5ea90439e862' skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme4n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200366', 'nvme-eui.333959304b2003660025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme4n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200366 - nvme-eui.333959304b2003660025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-1', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e23--h07--740xd-swap', 'dm-uuid-LVM-3yaE0F4d0F9qdNjMMWBtY2LMI24hF8gRdjpmiBngf9SYx0OWdmismG84cF1vC1Ks'], 'uuids': ['6db506e9-a725-467e-b0bf-704339ade05f'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '0018b87c0ba85eeb2200c53a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '8388608', 'sectorsize': '512', 'size': '4.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-1 value: holders: [] host: '' links: ids: - dm-name-rhel_e23--h07--740xd-swap - dm-uuid-LVM-3yaE0F4d0F9qdNjMMWBtY2LMI24hF8gRdjpmiBngf9SYx0OWdmismG84cF1vC1Ks labels: [] masters: [] uuids: - 6db506e9-a725-467e-b0bf-704339ade05f model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '8388608' sectorsize: '512' serial: 0018b87c0ba85eeb2200c53a59604609 size: 4.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme0n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200367', 'nvme-eui.333959304b2003670025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme0n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200367 - nvme-eui.333959304b2003670025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme3n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200245', 'nvme-eui.333959304b2002450025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme3n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200245 - nvme-eui.333959304b2002450025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme2n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200275', 'nvme-eui.333959304b2002750025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme2n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200275 - nvme-eui.333959304b2002750025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-2', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e23--h07--740xd-home', 'dm-uuid-LVM-3yaE0F4d0F9qdNjMMWBtY2LMI24hF8gRr9FJKliDez5cqadzztq4A514O1iXZax5'], 'uuids': ['fc0453c8-713d-4c27-a5d3-653d138ab5b6'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '0018b87c0ba85eeb2200c53a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '821288960', 'sectorsize': '512', 'size': '391.62 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-2 value: holders: [] host: '' links: ids: - dm-name-rhel_e23--h07--740xd-home - dm-uuid-LVM-3yaE0F4d0F9qdNjMMWBtY2LMI24hF8gRr9FJKliDez5cqadzztq4A514O1iXZax5 labels: [] masters: [] uuids: - fc0453c8-713d-4c27-a5d3-653d138ab5b6 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '821288960' sectorsize: '512' serial: 0018b87c0ba85eeb2200c53a59604609 size: 391.62 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'dm-0', 'value': {'virtual': 1, 'links': {'ids': ['dm-name-rhel_e23--h07--740xd-root', 'dm-uuid-LVM-3yaE0F4d0F9qdNjMMWBtY2LMI24hF8gRwsP4gO968B2Xg2efVa2YFsjoaJFX2Is0'], 'uuids': ['b5da91df-d190-4b3f-8d21-696e4036d8a8'], 'labels': [], 'masters': []}, 'vendor': None, 'model': None, 'sas_address': None, 'sas_device_handle': None, 'serial': '0018b87c0ba85eeb2200c53a59604609', 'removable': '0', 'support_discard': '0', 'partitions': {}, 'rotational': '1', 'scheduler_mode': '', 'sectors': '104857600', 'sectorsize': '512', 'size': '50.00 GB', 'host': '', 'holders': []}}) => changed=false item: key: dm-0 value: holders: [] host: '' links: ids: - dm-name-rhel_e23--h07--740xd-root - dm-uuid-LVM-3yaE0F4d0F9qdNjMMWBtY2LMI24hF8gRwsP4gO968B2Xg2efVa2YFsjoaJFX2Is0 labels: [] masters: [] uuids: - b5da91df-d190-4b3f-8d21-696e4036d8a8 model: null partitions: {} removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: '' sectors: '104857600' sectorsize: '512' serial: 0018b87c0ba85eeb2200c53a59604609 size: 50.00 GB support_discard: '0' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme1n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200372', 'nvme-eui.333959304b2003720025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme1n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200372 - nvme-eui.333959304b2003720025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'sda', 'value': {'virtual': 1, 'links': {'ids': ['scsi-36d094660593ac50022eb5ea80b7cb818', 'wwn-0x6d094660593ac50022eb5ea80b7cb818'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': 'DELL', 'model': 'PERC H740P Adp', 'sas_address': None, 'sas_device_handle': None, 'serial': '0018b87c0ba85eeb2200c53a59604609', 'removable': '0', 'support_discard': '0', 'wwn': '0x6d094660593ac50022eb5ea80b7cb818', 'partitions': {'sda2': {'links': {'ids': ['lvm-pv-uuid-t9kfnk-Wanh-hRO0-o0BW-pyfK-o3kJ-C0TwO1', 'scsi-36d094660593ac50022eb5ea80b7cb818-part2', 'wwn-0x6d094660593ac50022eb5ea80b7cb818-part2'], 'uuids': [], 'labels': [], 'masters': ['dm-0', 'dm-1', 'dm-2']}, 'start': '2099200', 'sectors': '934541312', 'sectorsize': 512, 'size': '445.62 GB', 'uuid': None, 'holders': ['rhel_e23--h07--740xd-swap', 'rhel_e23--h07--740xd-home', 'rhel_e23--h07--740xd-root']}, 'sda1': {'links': {'ids': ['scsi-36d094660593ac50022eb5ea80b7cb818-part1', 'wwn-0x6d094660593ac50022eb5ea80b7cb818-part1'], 'uuids': ['fca62056-4ead-4b46-a6ed-ddeee9dbccf8'], 'labels': [], 'masters': []}, 'start': '2048', 'sectors': '2097152', 'sectorsize': 512, 'size': '1.00 GB', 'uuid': 'fca62056-4ead-4b46-a6ed-ddeee9dbccf8', 'holders': []}}, 'rotational': '1', 'scheduler_mode': 'mq-deadline', 'sectors': '936640512', 'sectorsize': '512', 'size': '446.62 GB', 'host': 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)', 'holders': []}}) => changed=false item: key: sda value: holders: [] host: 'RAID bus controller: LSI Logic / Symbios Logic MegaRAID Tri-Mode SAS3508 (rev 01)' links: ids: - scsi-36d094660593ac50022eb5ea80b7cb818 - wwn-0x6d094660593ac50022eb5ea80b7cb818 labels: [] masters: [] uuids: [] model: PERC H740P Adp partitions: sda1: holders: [] links: ids: - scsi-36d094660593ac50022eb5ea80b7cb818-part1 - wwn-0x6d094660593ac50022eb5ea80b7cb818-part1 labels: [] masters: [] uuids: - fca62056-4ead-4b46-a6ed-ddeee9dbccf8 sectors: '2097152' sectorsize: 512 size: 1.00 GB start: '2048' uuid: fca62056-4ead-4b46-a6ed-ddeee9dbccf8 sda2: holders: - rhel_e23--h07--740xd-swap - rhel_e23--h07--740xd-home - rhel_e23--h07--740xd-root links: ids: - lvm-pv-uuid-t9kfnk-Wanh-hRO0-o0BW-pyfK-o3kJ-C0TwO1 - scsi-36d094660593ac50022eb5ea80b7cb818-part2 - wwn-0x6d094660593ac50022eb5ea80b7cb818-part2 labels: [] masters: - dm-0 - dm-1 - dm-2 uuids: [] sectors: '934541312' sectorsize: 512 size: 445.62 GB start: '2099200' uuid: null removable: '0' rotational: '1' sas_address: null sas_device_handle: null scheduler_mode: mq-deadline sectors: '936640512' sectorsize: '512' serial: 0018b87c0ba85eeb2200c53a59604609 size: 446.62 GB support_discard: '0' vendor: DELL virtual: 1 wwn: '0x6d094660593ac50022eb5ea80b7cb818' skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'key': 'nvme4n1', 'value': {'virtual': 1, 'links': {'ids': ['nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200238', 'nvme-eui.333959304b2002380025385800000002'], 'uuids': [], 'labels': [], 'masters': []}, 'vendor': None, 'model': 'Dell Express Flash PM1725a 800GB SFF', 'sas_address': None, 'sas_device_handle': None, 'removable': '0', 'support_discard': '512', 'partitions': {}, 'rotational': '0', 'scheduler_mode': 'none', 'sectors': '1562824368', 'sectorsize': '512', 'size': '745.21 GB', 'host': 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)', 'holders': []}}) => changed=false item: key: nvme4n1 value: holders: [] host: 'Non-Volatile memory controller: Samsung Electronics Co Ltd NVMe SSD Controller 172Xa/172Xb (rev 01)' links: ids: - nvme-Dell_Express_Flash_PM1725a_800GB_SFF__S39YNX0K200238 - nvme-eui.333959304b2002380025385800000002 labels: [] masters: [] uuids: [] model: Dell Express Flash PM1725a 800GB SFF partitions: {} removable: '0' rotational: '0' sas_address: null sas_device_handle: null scheduler_mode: none sectors: '1562824368' sectorsize: '512' size: 745.21 GB support_discard: '512' vendor: null virtual: 1 skip_reason: Conditional result was False TASK [ceph-facts : set_fact ceph_uid for debian based system - non container] ***************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:189 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.202) 0:02:25.339 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact ceph_uid for red hat or suse based system - non container] ******************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:196 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.114) 0:02:25.454 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_uid: 167 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_uid: 167 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_uid: 167 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_uid: 167 TASK [ceph-facts : set_fact ceph_uid for debian based system - container] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:203 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.129) 0:02:25.584 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact ceph_uid for red hat based system - container] ******************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:210 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.110) 0:02:25.694 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact ceph_uid for red hat] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:218 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.109) 0:02:25.804 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact rgw_hostname] ***************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:225 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.113) 0:02:25.917 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact osd_pool_default_pg_num] ****************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:240 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.113) 0:02:26.031 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_pg_num: '8' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_pg_num: '8' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_pg_num: '8' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_pg_num: '8' TASK [ceph-facts : set_fact osd_pool_default_size] ******************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:244 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.126) 0:02:26.158 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_size: '3' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_size: '3' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_size: '3' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_size: '3' TASK [ceph-facts : set_fact osd_pool_default_min_size] **************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:248 Tuesday 16 April 2019 19:53:17 +0000 (0:00:00.126) 0:02:26.285 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_min_size: '0' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_min_size: '0' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_min_size: '0' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_min_size: '0' TASK [ceph-facts : check if the ceph conf exists] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:252 Tuesday 16 April 2019 19:53:18 +0000 (0:00:00.131) 0:02:26.416 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false stat: atime: 1555437690.8380513 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: us-ascii checksum: 330237fdc57b6c989fa69999b1ccc4fb48a98e8d ctime: 1555437669.2284222 dev: 64768 device_type: 0 executable: false exists: true gid: 167 gr_name: ceph inode: 92412950 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mimetype: text/plain mode: '0644' mtime: 1555437669.1134188 nlink: 1 path: /etc/ceph/ceph.conf pw_name: ceph readable: true rgrp: true roth: true rusr: true size: 416 uid: 167 version: '3761933827' wgrp: false woth: false writeable: true wusr: true xgrp: false xoth: false xusr: false ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false stat: atime: 1555437690.8775265 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: us-ascii checksum: 330237fdc57b6c989fa69999b1ccc4fb48a98e8d ctime: 1555437669.2488 dev: 64768 device_type: 0 executable: false exists: true gid: 167 gr_name: ceph inode: 117517505 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mimetype: text/plain mode: '0644' mtime: 1555437669.1307962 nlink: 1 path: /etc/ceph/ceph.conf pw_name: ceph readable: true rgrp: true roth: true rusr: true size: 416 uid: 167 version: '49894912' wgrp: false woth: false writeable: true wusr: true xgrp: false xoth: false xusr: false ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false stat: atime: 1555437690.8943279 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: us-ascii checksum: 330237fdc57b6c989fa69999b1ccc4fb48a98e8d ctime: 1555437669.2726307 dev: 64768 device_type: 0 executable: false exists: true gid: 167 gr_name: ceph inode: 92417050 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mimetype: text/plain mode: '0644' mtime: 1555437669.164627 nlink: 1 path: /etc/ceph/ceph.conf pw_name: ceph readable: true rgrp: true roth: true rusr: true size: 416 uid: 167 version: '492643242' wgrp: false woth: false writeable: true wusr: true xgrp: false xoth: false xusr: false ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false stat: atime: 1555437690.9213684 attr_flags: '' attributes: [] block_size: 4096 blocks: 8 charset: us-ascii checksum: 330237fdc57b6c989fa69999b1ccc4fb48a98e8d ctime: 1555437669.2947676 dev: 64768 device_type: 0 executable: false exists: true gid: 167 gr_name: ceph inode: 92411413 isblk: false ischr: false isdir: false isfifo: false isgid: false islnk: false isreg: true issock: false isuid: false mimetype: text/plain mode: '0644' mtime: 1555437669.1847646 nlink: 1 path: /etc/ceph/ceph.conf pw_name: ceph readable: true rgrp: true roth: true rusr: true size: 416 uid: 167 version: '2754227389' wgrp: false woth: false writeable: true wusr: true xgrp: false xoth: false xusr: false TASK [ceph-facts : get default crush rule value from ceph configuration] ********************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:257 Tuesday 16 April 2019 19:53:18 +0000 (0:00:00.268) 0:02:26.685 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - grep - osd pool default crush rule - /etc/ceph/ceph.conf delta: '0:00:00.001736' end: '2019-04-16 19:53:18.509081' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:18.507345' stderr: '' stderr_lines: [] stdout: '' stdout_lines: ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - grep - osd pool default crush rule - /etc/ceph/ceph.conf delta: '0:00:00.001778' end: '2019-04-16 19:53:18.539371' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:18.537593' stderr: '' stderr_lines: [] stdout: '' stdout_lines: ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - grep - osd pool default crush rule - /etc/ceph/ceph.conf delta: '0:00:00.001765' end: '2019-04-16 19:53:18.563487' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:18.561722' stderr: '' stderr_lines: [] stdout: '' stdout_lines: ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - grep - osd pool default crush rule - /etc/ceph/ceph.conf delta: '0:00:00.001677' end: '2019-04-16 19:53:18.593089' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:18.591412' stderr: '' stderr_lines: [] stdout: '' stdout_lines: TASK [ceph-facts : set_fact osd_pool_default_crush_rule] ************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:264 Tuesday 16 April 2019 19:53:18 +0000 (0:00:00.262) 0:02:26.947 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_crush_rule: '-1' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_crush_rule: '-1' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_crush_rule: '-1' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: osd_pool_default_crush_rule: '-1' TASK [ceph-facts : set_fact _monitor_address to monitor_address_block] ************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_monitor_address.yml:2 Tuesday 16 April 2019 19:53:18 +0000 (0:00:00.132) 0:02:27.080 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False TASK [ceph-facts : set_fact _monitor_address to monitor_address] ****************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_monitor_address.yml:12 Tuesday 16 April 2019 19:53:18 +0000 (0:00:00.135) 0:02:27.215 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False TASK [ceph-facts : set_fact _monitor_address to monitor_interface - ipv4] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_monitor_address.yml:22 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.132) 0:02:27.348 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False TASK [ceph-facts : set_fact _monitor_address to monitor_interface - ipv6] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_monitor_address.yml:34 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.133) 0:02:27.482 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h17-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h19-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=e24-h21-740xd.alias.bos.scalelab.redhat.com) => changed=false item: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False TASK [ceph-facts : set_fact _current_monitor_address] ***************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_monitor_address.yml:46 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.134) 0:02:27.616 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h17-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.17'}) => changed=false item: addr: 10.1.24.17 name: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h19-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.19'}) => changed=false item: addr: 10.1.24.19 name: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h21-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.21'}) => changed=false item: addr: 10.1.24.21 name: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h17-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.17'}) => changed=false item: addr: 10.1.24.17 name: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h19-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.19'}) => changed=false item: addr: 10.1.24.19 name: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h21-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.21'}) => changed=false item: addr: 10.1.24.21 name: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h17-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.17'}) => changed=false item: addr: 10.1.24.17 name: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h19-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.19'}) => changed=false item: addr: 10.1.24.19 name: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h21-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.21'}) => changed=false item: addr: 10.1.24.21 name: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h17-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.17'}) => changed=false item: addr: 10.1.24.17 name: e24-h17-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h19-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.19'}) => changed=false item: addr: 10.1.24.19 name: e24-h19-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'e24-h21-740xd.alias.bos.scalelab.redhat.com', 'addr': '10.1.24.21'}) => changed=false item: addr: 10.1.24.21 name: e24-h21-740xd.alias.bos.scalelab.redhat.com skip_reason: Conditional result was False TASK [ceph-facts : set_fact _radosgw_address to radosgw_address_block] ************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_radosgw_address.yml:2 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.138) 0:02:27.754 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact _radosgw_address to radosgw_address] ****************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_radosgw_address.yml:9 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.117) 0:02:27.872 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact _interface] ******************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_radosgw_address.yml:17 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.114) 0:02:27.987 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact _radosgw_address to radosgw_interface - ipv4] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_radosgw_address.yml:21 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.112) 0:02:28.099 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact _radosgw_address to radosgw_interface - ipv6] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/set_radosgw_address.yml:27 Tuesday 16 April 2019 19:53:19 +0000 (0:00:00.112) 0:02:28.211 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set_fact rgw_instances] **************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:277 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.121) 0:02:28.333 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=0) => changed=false item: '0' skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=0) => changed=false item: '0' skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=0) => changed=false item: '0' skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=0) => changed=false item: '0' skip_reason: Conditional result was False TASK [ceph-facts : set ntp service name for Debian family] ************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:286 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.122) 0:02:28.456 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-facts : set ntp service name for Red Hat family] *********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:291 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.110) 0:02:28.566 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ntp_service_name: ntpd ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ntp_service_name: ntpd ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ntp_service_name: ntpd ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ntp_service_name: ntpd TASK [ceph-handler : include check_running_containers.yml] ************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_running_cluster.yml:2 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.130) 0:02:28.697 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : include check_socket_non_container.yml] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_running_cluster.yml:7 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.110) 0:02:28.807 ********* included: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-handler : check for a ceph mon socket] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:2 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.212) 0:02:29.019 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check if the ceph mon socket is in-use] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:11 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.113) 0:02:29.132 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : remove ceph mon socket if exists and not used by a process] ************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:21 Tuesday 16 April 2019 19:53:20 +0000 (0:00:00.118) 0:02:29.250 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a ceph osd socket] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:30 Tuesday 16 April 2019 19:53:21 +0000 (0:00:00.113) 0:02:29.364 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: stat --printf=%n /var/run/ceph/ceph-osd*.asok delta: '0:00:00.002262' end: '2019-04-16 19:53:21.189089' failed_when_result: false rc: 0 start: '2019-04-16 19:53:21.186827' stderr: '' stderr_lines: [] stdout: /var/run/ceph/ceph-osd.0.asok stdout_lines: ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: stat --printf=%n /var/run/ceph/ceph-osd*.asok delta: '0:00:00.002260' end: '2019-04-16 19:53:21.243027' failed_when_result: false rc: 0 start: '2019-04-16 19:53:21.240767' stderr: '' stderr_lines: [] stdout: /var/run/ceph/ceph-osd.2.asok stdout_lines: ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: stat --printf=%n /var/run/ceph/ceph-osd*.asok delta: '0:00:00.002331' end: '2019-04-16 19:53:21.268811' failed_when_result: false rc: 0 start: '2019-04-16 19:53:21.266480' stderr: '' stderr_lines: [] stdout: /var/run/ceph/ceph-osd.1.asok stdout_lines: ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: stat --printf=%n /var/run/ceph/ceph-osd*.asok delta: '0:00:01.003441' end: '2019-04-16 19:53:22.220612' failed_when_result: false rc: 0 start: '2019-04-16 19:53:21.217171' stderr: '' stderr_lines: [] stdout: /var/run/ceph/ceph-osd.0.asok/var/run/ceph/ceph-osd.3.asok stdout_lines: TASK [ceph-handler : check if the ceph osd socket is in-use] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:40 Tuesday 16 April 2019 19:53:22 +0000 (0:00:01.214) 0:02:30.579 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - fuser - --silent - /var/run/ceph/ceph-osd.0.asok delta: '0:00:00.016889' end: '2019-04-16 19:53:22.421886' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:22.404997' stderr: '' stderr_lines: [] stdout: '' stdout_lines: ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - fuser - --silent - /var/run/ceph/ceph-osd.0.asok/var/run/ceph/ceph-osd.3.asok delta: '0:00:00.025802' end: '2019-04-16 19:53:22.457852' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:22.432050' stderr: 'Cannot stat /var/run/ceph/ceph-osd.0.asok/var/run/ceph/ceph-osd.3.asok: Not a directory' stderr_lines: - 'Cannot stat /var/run/ceph/ceph-osd.0.asok/var/run/ceph/ceph-osd.3.asok: Not a directory' stdout: '' stdout_lines: ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - fuser - --silent - /var/run/ceph/ceph-osd.2.asok delta: '0:00:00.016646' end: '2019-04-16 19:53:22.473966' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:22.457320' stderr: '' stderr_lines: [] stdout: '' stdout_lines: ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - fuser - --silent - /var/run/ceph/ceph-osd.1.asok delta: '0:00:00.016221' end: '2019-04-16 19:53:22.501745' failed_when_result: false msg: non-zero return code rc: 1 start: '2019-04-16 19:53:22.485524' stderr: '' stderr_lines: [] stdout: '' stdout_lines: TASK [ceph-handler : remove ceph osd socket if exists and not used by a process] ************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:50 Tuesday 16 April 2019 19:53:22 +0000 (0:00:00.278) 0:02:30.857 ********* changed: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=true path: /var/run/ceph/ceph-osd.0.asok state: absent ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false path: /var/run/ceph/ceph-osd.0.asok/var/run/ceph/ceph-osd.3.asok state: absent changed: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=true path: /var/run/ceph/ceph-osd.2.asok state: absent changed: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=true path: /var/run/ceph/ceph-osd.1.asok state: absent TASK [ceph-handler : check for a ceph mds socket] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:59 Tuesday 16 April 2019 19:53:22 +0000 (0:00:00.261) 0:02:31.118 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check if the ceph mds socket is in-use] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:69 Tuesday 16 April 2019 19:53:22 +0000 (0:00:00.115) 0:02:31.233 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : remove ceph mds socket if exists and not used by a process] ************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:79 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.121) 0:02:31.354 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a ceph rgw socket] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:88 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.112) 0:02:31.467 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check if the ceph rgw socket is in-use] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:98 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.113) 0:02:31.581 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : remove ceph rgw socket if exists and not used by a process] ************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:108 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.113) 0:02:31.694 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a ceph mgr socket] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:117 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.112) 0:02:31.806 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check if the ceph mgr socket is in-use] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:127 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.117) 0:02:31.923 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : remove ceph mgr socket if exists and not used by a process] ************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:137 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.113) 0:02:32.037 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a ceph rbd mirror socket] ************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:146 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.112) 0:02:32.149 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check if the ceph rbd mirror socket is in-use] *************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:156 Tuesday 16 April 2019 19:53:23 +0000 (0:00:00.113) 0:02:32.263 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : remove ceph rbd mirror socket if exists and not used by a process] ******************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:166 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.123) 0:02:32.386 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a ceph nfs ganesha socket] ************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:175 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.114) 0:02:32.500 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check if the ceph nfs ganesha socket is in-use] ************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:184 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.113) 0:02:32.614 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : remove ceph nfs ganesha socket if exists and not used by a process] ****************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:194 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.114) 0:02:32.728 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a tcmu-runner] ************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:203 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.112) 0:02:32.840 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a rbd-target-api] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:212 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.115) 0:02:32.956 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-handler : check for a rbd-target-gw] *********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:221 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.113) 0:02:33.070 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include_tasks installs/install_on_redhat.yml] ***************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:2 Tuesday 16 April 2019 19:53:24 +0000 (0:00:00.113) 0:02:33.184 ********* included: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_on_redhat.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-common : include configure_redhat_repository_installation.yml] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_on_redhat.yml:2 Tuesday 16 April 2019 19:53:25 +0000 (0:00:00.203) 0:02:33.387 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include configure_redhat_local_installation.yml] ************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_on_redhat.yml:7 Tuesday 16 April 2019 19:53:25 +0000 (0:00:00.112) 0:02:33.500 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include install_redhat_packages.yml] ************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_on_redhat.yml:12 Tuesday 16 April 2019 19:53:25 +0000 (0:00:00.112) 0:02:33.613 ********* included: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_redhat_packages.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-common : install redhat dependencies] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_redhat_packages.yml:2 Tuesday 16 April 2019 19:53:25 +0000 (0:00:00.198) 0:02:33.812 ********* ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: python3-pycurl' - 'Installed: python3-setuptools' ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: python3-pycurl' - 'Installed: python3-setuptools' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: python3-pycurl' - 'Installed: python3-setuptools' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: python3-pycurl' - 'Installed: python3-setuptools' TASK [ceph-common : install centos dependencies] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_redhat_packages.yml:11 Tuesday 16 April 2019 19:53:26 +0000 (0:00:01.141) 0:02:34.953 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : install redhat ceph packages] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_redhat_packages.yml:20 Tuesday 16 April 2019 19:53:26 +0000 (0:00:00.115) 0:02:35.069 ********* ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: ceph-common' - 'Installed: ceph-osd' ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: ceph-common' - 'Installed: ceph-osd' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: ceph-common' - 'Installed: ceph-osd' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: ceph-common' - 'Installed: ceph-osd' TASK [ceph-common : include_tasks installs/install_on_suse.yml] ******************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:8 Tuesday 16 April 2019 19:53:27 +0000 (0:00:00.980) 0:02:36.049 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include installs/install_on_debian.yml] *********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:14 Tuesday 16 April 2019 19:53:27 +0000 (0:00:00.112) 0:02:36.162 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include_tasks installs/install_on_clear.yml] ****************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:21 Tuesday 16 April 2019 19:53:27 +0000 (0:00:00.117) 0:02:36.280 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : get ceph version] ********************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:27 Tuesday 16 April 2019 19:53:28 +0000 (0:00:00.112) 0:02:36.393 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - ceph - --version delta: '0:00:00.076057' end: '2019-04-16 19:53:28.287940' rc: 0 start: '2019-04-16 19:53:28.211883' stderr: '' stderr_lines: [] stdout: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stdout_lines: ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - ceph - --version delta: '0:00:00.076126' end: '2019-04-16 19:53:28.316256' rc: 0 start: '2019-04-16 19:53:28.240130' stderr: '' stderr_lines: [] stdout: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stdout_lines: ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - ceph - --version delta: '0:00:00.076527' end: '2019-04-16 19:53:28.342655' rc: 0 start: '2019-04-16 19:53:28.266128' stderr: '' stderr_lines: [] stdout: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stdout_lines: ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - ceph - --version delta: '0:00:00.076134' end: '2019-04-16 19:53:28.370069' rc: 0 start: '2019-04-16 19:53:28.293935' stderr: '' stderr_lines: [] stdout: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stdout_lines: TASK [ceph-common : set_fact ceph_version] **************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:33 Tuesday 16 April 2019 19:53:28 +0000 (0:00:00.331) 0:02:36.724 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_version: 14.2.0-142-g2f9c072 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_version: 14.2.0-142-g2f9c072 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_version: 14.2.0-142-g2f9c072 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_version: 14.2.0-142-g2f9c072 TASK [ceph-common : include release-rhcs.yml] ************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:38 Tuesday 16 April 2019 19:53:28 +0000 (0:00:00.128) 0:02:36.853 ********* included: /usr/share/ceph-ansible/roles/ceph-common/tasks/release-rhcs.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-common : set_fact ceph_release jewel] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/release-rhcs.yml:2 Tuesday 16 April 2019 19:53:28 +0000 (0:00:00.203) 0:02:37.056 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : set_fact ceph_release kraken] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/release-rhcs.yml:8 Tuesday 16 April 2019 19:53:28 +0000 (0:00:00.113) 0:02:37.170 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : set_fact ceph_release luminous] ******************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/release-rhcs.yml:14 Tuesday 16 April 2019 19:53:28 +0000 (0:00:00.114) 0:02:37.285 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : set_fact ceph_release mimic] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/release-rhcs.yml:20 Tuesday 16 April 2019 19:53:29 +0000 (0:00:00.113) 0:02:37.399 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : set_fact ceph_release nautilus] ******************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/release-rhcs.yml:26 Tuesday 16 April 2019 19:53:29 +0000 (0:00:00.113) 0:02:37.513 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_release: nautilus ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_release: nautilus ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_release: nautilus ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_release: nautilus TASK [ceph-common : set_fact ceph_release - override ceph_release with ceph_stable_release] *************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:47 Tuesday 16 April 2019 19:53:29 +0000 (0:00:00.128) 0:02:37.641 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include create_rbd_client_dir.yml] **************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:56 Tuesday 16 April 2019 19:53:29 +0000 (0:00:00.111) 0:02:37.753 ********* included: /usr/share/ceph-ansible/roles/ceph-common/tasks/create_rbd_client_dir.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-common : create rbd client directory] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/create_rbd_client_dir.yml:2 Tuesday 16 April 2019 19:53:29 +0000 (0:00:00.200) 0:02:37.953 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/run/ceph) => changed=false gid: 167 group: ceph item: /var/run/ceph mode: '0770' owner: ceph path: /var/run/ceph size: 40 state: directory uid: 167 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/run/ceph) => changed=false gid: 167 group: ceph item: /var/run/ceph mode: '0770' owner: ceph path: /var/run/ceph size: 80 state: directory uid: 167 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/run/ceph) => changed=false gid: 167 group: ceph item: /var/run/ceph mode: '0770' owner: ceph path: /var/run/ceph size: 40 state: directory uid: 167 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/run/ceph) => changed=false gid: 167 group: ceph item: /var/run/ceph mode: '0770' owner: ceph path: /var/run/ceph size: 40 state: directory uid: 167 ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/log/ceph) => changed=false gid: 167 group: ceph item: /var/log/ceph mode: '0770' owner: ceph path: /var/log/ceph size: 51 state: directory uid: 167 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/log/ceph) => changed=false gid: 167 group: ceph item: /var/log/ceph mode: '0770' owner: ceph path: /var/log/ceph size: 134 state: directory uid: 167 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/log/ceph) => changed=false gid: 167 group: ceph item: /var/log/ceph mode: '0770' owner: ceph path: /var/log/ceph size: 51 state: directory uid: 167 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/log/ceph) => changed=false gid: 167 group: ceph item: /var/log/ceph mode: '0770' owner: ceph path: /var/log/ceph size: 51 state: directory uid: 167 TASK [ceph-common : include configure_cluster_name.yml] *************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:59 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.395) 0:02:38.349 ********* included: /usr/share/ceph-ansible/roles/ceph-common/tasks/configure_cluster_name.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-common : configure cluster name] *************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/configure_cluster_name.yml:2 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.205) 0:02:38.554 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false backup: '' msg: '' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false backup: '' msg: '' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false backup: '' msg: '' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false backup: '' msg: '' TASK [ceph-common : check /etc/default/ceph exist] ******************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/configure_cluster_name.yml:24 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.256) 0:02:38.811 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : when /etc/default/ceph is not dir] **************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/configure_cluster_name.yml:32 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.116) 0:02:38.927 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : when /etc/default/ceph is dir] ******************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/configure_cluster_name.yml:42 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.113) 0:02:39.041 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-common : include configure_memory_allocator.yml] *********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-common/tasks/main.yml:62 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.113) 0:02:39.155 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : include create_ceph_initial_dirs.yml] ************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:2 Tuesday 16 April 2019 19:53:30 +0000 (0:00:00.117) 0:02:39.273 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : count number of osds for ceph-disk scenarios] ***************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:8 Tuesday 16 April 2019 19:53:31 +0000 (0:00:00.112) 0:02:39.385 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : count number of osds for lvm scenario] ************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:15 Tuesday 16 April 2019 19:53:31 +0000 (0:00:00.117) 0:02:39.503 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : run 'ceph-volume lvm batch --report' to see how many osds are to be created] ********************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:22 Tuesday 16 April 2019 19:53:31 +0000 (0:00:00.114) 0:02:39.618 ********* changed: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 - --report - --format=json delta: '0:00:00.717640' end: '2019-04-16 19:53:32.300224' rc: 0 start: '2019-04-16 19:53:31.582584' stderr: '' stderr_lines: [] stdout: |- { "changed": true, "osds": [ { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } } ], "vgs": [] } stdout_lines: changed: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 - --report - --format=json delta: '0:00:00.718541' end: '2019-04-16 19:53:32.305071' rc: 0 start: '2019-04-16 19:53:31.586530' stderr: '' stderr_lines: [] stdout: |- { "changed": true, "osds": [ { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } } ], "vgs": [] } stdout_lines: changed: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 - --report - --format=json delta: '0:00:00.745442' end: '2019-04-16 19:53:32.328339' rc: 0 start: '2019-04-16 19:53:31.582897' stderr: '' stderr_lines: [] stdout: |- { "changed": true, "osds": [ { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } } ], "vgs": [] } stdout_lines: changed: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 - --report - --format=json delta: '0:00:00.744017' end: '2019-04-16 19:53:32.327865' rc: 0 start: '2019-04-16 19:53:31.583848' stderr: '' stderr_lines: [] stdout: |- { "changed": true, "osds": [ { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme0n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme1n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme2n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme3n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } }, { "block.db": {}, "data": { "human_readable_size": "372.00 GB", "parts": 2, "path": "/dev/nvme4n1", "percentage": 50.0, "size": 399431958528 } } ], "vgs": [] } stdout_lines: TASK [ceph-config : set_fact num_osds from the output of 'ceph-volume lvm batch --report'] **************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:41 Tuesday 16 April 2019 19:53:32 +0000 (0:00:01.077) 0:02:40.695 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: num_osds: '10' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: num_osds: '10' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: num_osds: '10' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: num_osds: '10' TASK [ceph-config : run 'ceph-volume lvm list' to see how many osds have already been created] ************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:49 Tuesday 16 April 2019 19:53:32 +0000 (0:00:00.136) 0:02:40.831 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : set_fact num_osds from the output of 'ceph-volume lvm list'] ************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:62 Tuesday 16 April 2019 19:53:32 +0000 (0:00:00.118) 0:02:40.949 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : create ceph conf directory] *********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:75 Tuesday 16 April 2019 19:53:32 +0000 (0:00:00.122) 0:02:41.072 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false gid: 167 group: ceph mode: '0755' owner: ceph path: /etc/ceph size: 37 state: directory uid: 167 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false gid: 167 group: ceph mode: '0755' owner: ceph path: /etc/ceph size: 37 state: directory uid: 167 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false gid: 167 group: ceph mode: '0755' owner: ceph path: /etc/ceph size: 37 state: directory uid: 167 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false gid: 167 group: ceph mode: '0755' owner: ceph path: /etc/ceph size: 37 state: directory uid: 167 TASK [ceph-config : generate ceph configuration file: ceph.conf] ****************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:83 Tuesday 16 April 2019 19:53:33 +0000 (0:00:00.259) 0:02:41.331 ********* NOTIFIED HANDLER ceph-handler : set _mon_handler_called before restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mon restart script for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - non container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mon_handler_called after restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called before restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy osd restart script for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - non container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called after restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called before restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mds restart script for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - non container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called after restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called before restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rgw restart script for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - non container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called after restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called before restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mgr restart script for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - non container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called after restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called before restart for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rbd mirror restart script for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - non container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - container for e23-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called after restart for e23-h05-740xd.alias.bos.scalelab.redhat.com changed: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=true checksum: bae00895c30418d0bcf6ed312df981b0e13e8775 dest: /etc/ceph/ceph.conf gid: 167 group: ceph md5sum: 42a5c69691a4b88d261abe6580af99b2 mode: '0644' owner: ceph size: 448 src: /root/.ansible/tmp/ansible-tmp-1555444413.0436728-67202066445730/source state: file uid: 167 NOTIFIED HANDLER ceph-handler : set _mon_handler_called before restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mon restart script for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - non container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mon_handler_called after restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called before restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy osd restart script for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - non container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called after restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called before restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mds restart script for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - non container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called after restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called before restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rgw restart script for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - non container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called after restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called before restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mgr restart script for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - non container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called after restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called before restart for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rbd mirror restart script for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - non container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - container for e24-h05-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called after restart for e24-h05-740xd.alias.bos.scalelab.redhat.com changed: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=true checksum: bae00895c30418d0bcf6ed312df981b0e13e8775 dest: /etc/ceph/ceph.conf gid: 167 group: ceph md5sum: 42a5c69691a4b88d261abe6580af99b2 mode: '0644' owner: ceph size: 448 src: /root/.ansible/tmp/ansible-tmp-1555444413.0695848-33670667912494/source state: file uid: 167 NOTIFIED HANDLER ceph-handler : set _mon_handler_called before restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mon restart script for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - non container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mon_handler_called after restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called before restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy osd restart script for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - non container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called after restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called before restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mds restart script for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - non container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called after restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called before restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rgw restart script for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - non container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called after restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called before restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mgr restart script for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - non container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called after restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called before restart for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rbd mirror restart script for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - non container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - container for e24-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called after restart for e24-h07-740xd.alias.bos.scalelab.redhat.com changed: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=true checksum: bae00895c30418d0bcf6ed312df981b0e13e8775 dest: /etc/ceph/ceph.conf gid: 167 group: ceph md5sum: 42a5c69691a4b88d261abe6580af99b2 mode: '0644' owner: ceph size: 448 src: /root/.ansible/tmp/ansible-tmp-1555444413.0980124-83640594953987/source state: file uid: 167 NOTIFIED HANDLER ceph-handler : set _mon_handler_called before restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mon restart script for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - non container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mon daemon(s) - container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mon_handler_called after restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called before restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy osd restart script for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - non container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph osds daemon(s) - container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _osd_handler_called after restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called before restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mds restart script for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - non container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mds daemon(s) - container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mds_handler_called after restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called before restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rgw restart script for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - non container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rgw daemon(s) - container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rgw_handler_called after restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called before restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy mgr restart script for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - non container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph mgr daemon(s) - container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _mgr_handler_called after restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called before restart for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : copy rbd mirror restart script for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - non container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : restart ceph rbd mirror daemon(s) - container for e23-h07-740xd.alias.bos.scalelab.redhat.com NOTIFIED HANDLER ceph-handler : set _rbdmirror_handler_called after restart for e23-h07-740xd.alias.bos.scalelab.redhat.com changed: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=true checksum: bae00895c30418d0bcf6ed312df981b0e13e8775 dest: /etc/ceph/ceph.conf gid: 167 group: ceph md5sum: 42a5c69691a4b88d261abe6580af99b2 mode: '0644' owner: ceph size: 448 src: /root/.ansible/tmp/ansible-tmp-1555444413.1301386-197002219057290/source state: file uid: 167 TASK [ceph-config : ensure fetch directory exists] ******************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:101 Tuesday 16 April 2019 19:53:46 +0000 (0:00:13.931) 0:02:55.263 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : generate ceph.conf configuration file locally] **************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:112 Tuesday 16 April 2019 19:53:46 +0000 (0:00:00.042) 0:02:55.306 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : create a local fetch directory if it does not exist] ********************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:129 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.041) 0:02:55.347 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-config : generate ceph.conf configuration file] ************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:142 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.037) 0:02:55.385 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include_tasks system_tuning.yml] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:2 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.112) 0:02:55.497 ********* included: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-osd : disable osd directory parsing by updatedb] *********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:4 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.203) 0:02:55.700 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : disable osd directory path in updatedb.conf] ********************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:8 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.116) 0:02:55.817 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : create tmpfiles.d directory] ************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:18 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.115) 0:02:55.932 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : disable transparent hugepage] ************************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:29 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.114) 0:02:56.046 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : get default vm.min_free_kbytes] ********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:41 Tuesday 16 April 2019 19:53:47 +0000 (0:00:00.114) 0:02:56.161 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - sysctl - -b - vm.min_free_kbytes delta: '0:00:00.002683' end: '2019-04-16 19:53:47.981847' failed_when_result: false rc: 0 start: '2019-04-16 19:53:47.979164' stderr: '' stderr_lines: [] stdout: '4194303' stdout_lines: ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - sysctl - -b - vm.min_free_kbytes delta: '0:00:00.002674' end: '2019-04-16 19:53:48.076535' failed_when_result: false rc: 0 start: '2019-04-16 19:53:48.073861' stderr: '' stderr_lines: [] stdout: '4194303' stdout_lines: ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - sysctl - -b - vm.min_free_kbytes delta: '0:00:00.002634' end: '2019-04-16 19:53:48.112313' failed_when_result: false rc: 0 start: '2019-04-16 19:53:48.109679' stderr: '' stderr_lines: [] stdout: '4194303' stdout_lines: ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false cmd: - sysctl - -b - vm.min_free_kbytes delta: '0:00:00.002646' end: '2019-04-16 19:53:48.136676' failed_when_result: false rc: 0 start: '2019-04-16 19:53:48.134030' stderr: '' stderr_lines: [] stdout: '4194303' stdout_lines: TASK [ceph-osd : set_fact vm_min_free_kbytes] ************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:48 Tuesday 16 April 2019 19:53:48 +0000 (0:00:00.327) 0:02:56.489 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: vm_min_free_kbytes: '4194303' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: vm_min_free_kbytes: '4194303' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: vm_min_free_kbytes: '4194303' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: vm_min_free_kbytes: '4194303' TASK [ceph-osd : apply operating system tuning] *********************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/system_tuning.yml:52 Tuesday 16 April 2019 19:53:48 +0000 (0:00:00.194) 0:02:56.684 ********* ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.aio-max-nr', 'value': '1048576', 'enable': "(osd_objectstore == 'bluestore')"}) => changed=false item: enable: (osd_objectstore == 'bluestore') name: fs.aio-max-nr value: '1048576' ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.aio-max-nr', 'value': '1048576', 'enable': "(osd_objectstore == 'bluestore')"}) => changed=false item: enable: (osd_objectstore == 'bluestore') name: fs.aio-max-nr value: '1048576' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.aio-max-nr', 'value': '1048576', 'enable': "(osd_objectstore == 'bluestore')"}) => changed=false item: enable: (osd_objectstore == 'bluestore') name: fs.aio-max-nr value: '1048576' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.aio-max-nr', 'value': '1048576', 'enable': "(osd_objectstore == 'bluestore')"}) => changed=false item: enable: (osd_objectstore == 'bluestore') name: fs.aio-max-nr value: '1048576' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.file-max', 'value': 26234859}) => changed=false item: name: fs.file-max value: 26234859 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.file-max', 'value': 26234859}) => changed=false item: name: fs.file-max value: 26234859 ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.file-max', 'value': 26234859}) => changed=false item: name: fs.file-max value: 26234859 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'fs.file-max', 'value': 26234859}) => changed=false item: name: fs.file-max value: 26234859 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.zone_reclaim_mode', 'value': 0}) => changed=false item: name: vm.zone_reclaim_mode value: 0 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.zone_reclaim_mode', 'value': 0}) => changed=false item: name: vm.zone_reclaim_mode value: 0 ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.zone_reclaim_mode', 'value': 0}) => changed=false item: name: vm.zone_reclaim_mode value: 0 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.zone_reclaim_mode', 'value': 0}) => changed=false item: name: vm.zone_reclaim_mode value: 0 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.swappiness', 'value': 10}) => changed=false item: name: vm.swappiness value: 10 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.swappiness', 'value': 10}) => changed=false item: name: vm.swappiness value: 10 ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.swappiness', 'value': 10}) => changed=false item: name: vm.swappiness value: 10 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.swappiness', 'value': 10}) => changed=false item: name: vm.swappiness value: 10 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.min_free_kbytes', 'value': '4194303'}) => changed=false item: name: vm.min_free_kbytes value: '4194303' ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.min_free_kbytes', 'value': '4194303'}) => changed=false item: name: vm.min_free_kbytes value: '4194303' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.min_free_kbytes', 'value': '4194303'}) => changed=false item: name: vm.min_free_kbytes value: '4194303' ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': 'vm.min_free_kbytes', 'value': '4194303'}) => changed=false item: name: vm.min_free_kbytes value: '4194303' TASK [ceph-osd : install dependencies] ******************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:5 Tuesday 16 April 2019 19:53:49 +0000 (0:00:00.892) 0:02:57.577 ********* ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: parted' ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: parted' ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: parted' ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false attempts: 1 msg: Nothing to do rc: 0 results: - 'Installed: parted' TASK [ceph-osd : install numactl when needed] ************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:15 Tuesday 16 April 2019 19:53:50 +0000 (0:00:01.003) 0:02:58.581 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include_tasks common.yml] **************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:26 Tuesday 16 April 2019 19:53:50 +0000 (0:00:00.113) 0:02:58.694 ********* included: /usr/share/ceph-ansible/roles/ceph-osd/tasks/common.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-osd : create bootstrap-osd and osd directories] ************************************************************************************************************************************************************************************************************ task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/common.yml:2 Tuesday 16 April 2019 19:53:50 +0000 (0:00:00.204) 0:02:58.899 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/bootstrap-osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/bootstrap-osd/ mode: '0755' owner: ceph path: /var/lib/ceph/bootstrap-osd/ size: 26 state: directory uid: 167 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/bootstrap-osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/bootstrap-osd/ mode: '0755' owner: ceph path: /var/lib/ceph/bootstrap-osd/ size: 26 state: directory uid: 167 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/bootstrap-osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/bootstrap-osd/ mode: '0755' owner: ceph path: /var/lib/ceph/bootstrap-osd/ size: 26 state: directory uid: 167 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/bootstrap-osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/bootstrap-osd/ mode: '0755' owner: ceph path: /var/lib/ceph/bootstrap-osd/ size: 26 state: directory uid: 167 ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/osd/ mode: '0755' owner: ceph path: /var/lib/ceph/osd/ size: 20 state: directory uid: 167 ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/osd/ mode: '0755' owner: ceph path: /var/lib/ceph/osd/ size: 34 state: directory uid: 167 ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/osd/ mode: '0755' owner: ceph path: /var/lib/ceph/osd/ size: 20 state: directory uid: 167 ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/var/lib/ceph/osd/) => changed=false gid: 167 group: ceph item: /var/lib/ceph/osd/ mode: '0755' owner: ceph path: /var/lib/ceph/osd/ size: 20 state: directory uid: 167 TASK [ceph-osd : copy ceph key(s) if needed] ************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/common.yml:15 Tuesday 16 April 2019 19:53:51 +0000 (0:00:00.455) 0:02:59.354 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/var/lib/ceph/bootstrap-osd/ceph.keyring', 'copy_key': True}) => changed=false checksum: 04e93405d2f786b60b840132947e01f87bb08520 dest: /var/lib/ceph/bootstrap-osd/ceph.keyring gid: 167 group: ceph item: copy_key: true name: /var/lib/ceph/bootstrap-osd/ceph.keyring mode: '0600' owner: ceph path: /var/lib/ceph/bootstrap-osd/ceph.keyring size: 113 state: file uid: 167 skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/etc/ceph/ceph.client.admin.keyring', 'copy_key': False}) => changed=false item: copy_key: false name: /etc/ceph/ceph.client.admin.keyring skip_reason: Conditional result was False ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/var/lib/ceph/bootstrap-osd/ceph.keyring', 'copy_key': True}) => changed=false checksum: 04e93405d2f786b60b840132947e01f87bb08520 dest: /var/lib/ceph/bootstrap-osd/ceph.keyring gid: 167 group: ceph item: copy_key: true name: /var/lib/ceph/bootstrap-osd/ceph.keyring mode: '0600' owner: ceph path: /var/lib/ceph/bootstrap-osd/ceph.keyring size: 113 state: file uid: 167 skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/etc/ceph/ceph.client.admin.keyring', 'copy_key': False}) => changed=false item: copy_key: false name: /etc/ceph/ceph.client.admin.keyring skip_reason: Conditional result was False ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/var/lib/ceph/bootstrap-osd/ceph.keyring', 'copy_key': True}) => changed=false checksum: 04e93405d2f786b60b840132947e01f87bb08520 dest: /var/lib/ceph/bootstrap-osd/ceph.keyring gid: 167 group: ceph item: copy_key: true name: /var/lib/ceph/bootstrap-osd/ceph.keyring mode: '0600' owner: ceph path: /var/lib/ceph/bootstrap-osd/ceph.keyring size: 113 state: file uid: 167 skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/etc/ceph/ceph.client.admin.keyring', 'copy_key': False}) => changed=false item: copy_key: false name: /etc/ceph/ceph.client.admin.keyring skip_reason: Conditional result was False ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/var/lib/ceph/bootstrap-osd/ceph.keyring', 'copy_key': True}) => changed=false checksum: 04e93405d2f786b60b840132947e01f87bb08520 dest: /var/lib/ceph/bootstrap-osd/ceph.keyring gid: 167 group: ceph item: copy_key: true name: /var/lib/ceph/bootstrap-osd/ceph.keyring mode: '0600' owner: ceph path: /var/lib/ceph/bootstrap-osd/ceph.keyring size: 113 state: file uid: 167 skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item={'name': '/etc/ceph/ceph.client.admin.keyring', 'copy_key': False}) => changed=false item: copy_key: false name: /etc/ceph/ceph.client.admin.keyring skip_reason: Conditional result was False TASK [ceph-osd : include ceph_disk_cli_options_facts.yml] ************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:29 Tuesday 16 April 2019 19:53:51 +0000 (0:00:00.528) 0:02:59.882 ********* included: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-osd : set_fact ceph_disk_cli_options '--cluster ceph --bluestore'] ***************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:2 Tuesday 16 April 2019 19:53:51 +0000 (0:00:00.213) 0:03:00.095 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_disk_cli_options: --cluster ceph --bluestore ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_disk_cli_options: --cluster ceph --bluestore ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_disk_cli_options: --cluster ceph --bluestore ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false ansible_facts: ceph_disk_cli_options: --cluster ceph --bluestore TASK [ceph-osd : set_fact ceph_disk_cli_options 'ceph_disk_cli_options'] ********************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:10 Tuesday 16 April 2019 19:53:51 +0000 (0:00:00.134) 0:03:00.230 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : set_fact ceph_disk_cli_options '--cluster ceph --bluestore --dmcrypt'] ******************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:18 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.116) 0:03:00.346 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : set_fact ceph_disk_cli_options '--cluster ceph --filestore --dmcrypt'] ******************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:26 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.120) 0:03:00.467 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : set_fact docker_env_args '-e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=0'] ******************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:34 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.115) 0:03:00.582 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : set_fact docker_env_args '-e OSD_BLUESTORE=0 -e OSD_FILESTORE=1 -e OSD_DMCRYPT=1'] ******************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:42 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.111) 0:03:00.694 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : set_fact docker_env_args '-e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=0'] ******************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:50 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.111) 0:03:00.805 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : set_fact docker_env_args '-e OSD_BLUESTORE=1 -e OSD_FILESTORE=0 -e OSD_DMCRYPT=1'] ******************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/ceph_disk_cli_options_facts.yml:58 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.116) 0:03:00.921 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include build_devices.yml] *************************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:32 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.112) 0:03:01.034 ********* included: /usr/share/ceph-ansible/roles/ceph-osd/tasks/build_devices.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-osd : resolve dedicated device link(s)] ******************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/build_devices.yml:2 Tuesday 16 April 2019 19:53:52 +0000 (0:00:00.213) 0:03:01.248 ********* TASK [ceph-osd : set_fact build dedicated_devices from resolved symlinks] ********************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/build_devices.yml:11 Tuesday 16 April 2019 19:53:53 +0000 (0:00:00.111) 0:03:01.359 ********* TASK [ceph-osd : set_fact build final dedicated_devices list] ********************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/build_devices.yml:19 Tuesday 16 April 2019 19:53:53 +0000 (0:00:00.111) 0:03:01.471 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : read information about the devices] ****************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:35 Tuesday 16 April 2019 19:53:53 +0000 (0:00:00.114) 0:03:01.586 ********* ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false disk: dev: /dev/nvme0n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme0n1 partitions: [] script: unit 'MiB' print ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false disk: dev: /dev/nvme0n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme0n1 partitions: [] script: unit 'MiB' print ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false disk: dev: /dev/nvme0n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme0n1 partitions: [] script: unit 'MiB' print ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme0n1) => changed=false disk: dev: /dev/nvme0n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme0n1 partitions: [] script: unit 'MiB' print ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false disk: dev: /dev/nvme1n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme1n1 partitions: [] script: unit 'MiB' print ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false disk: dev: /dev/nvme1n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme1n1 partitions: [] script: unit 'MiB' print ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false disk: dev: /dev/nvme1n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme1n1 partitions: [] script: unit 'MiB' print ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme1n1) => changed=false disk: dev: /dev/nvme1n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme1n1 partitions: [] script: unit 'MiB' print ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false disk: dev: /dev/nvme2n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme2n1 partitions: [] script: unit 'MiB' print ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false disk: dev: /dev/nvme2n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme2n1 partitions: [] script: unit 'MiB' print ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false disk: dev: /dev/nvme2n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme2n1 partitions: [] script: unit 'MiB' print ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme2n1) => changed=false disk: dev: /dev/nvme2n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme2n1 partitions: [] script: unit 'MiB' print ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false disk: dev: /dev/nvme3n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme3n1 partitions: [] script: unit 'MiB' print ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false disk: dev: /dev/nvme3n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme3n1 partitions: [] script: unit 'MiB' print ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false disk: dev: /dev/nvme3n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme3n1 partitions: [] script: unit 'MiB' print ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme3n1) => changed=false disk: dev: /dev/nvme3n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme3n1 partitions: [] script: unit 'MiB' print ok: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false disk: dev: /dev/nvme4n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme4n1 partitions: [] script: unit 'MiB' print ok: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false disk: dev: /dev/nvme4n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme4n1 partitions: [] script: unit 'MiB' print ok: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false disk: dev: /dev/nvme4n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme4n1 partitions: [] script: unit 'MiB' print ok: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => (item=/dev/nvme4n1) => changed=false disk: dev: /dev/nvme4n1 logical_block: 512 model: NVMe Device physical_block: 512 size: 763098.0 table: unknown unit: mib item: /dev/nvme4n1 partitions: [] script: unit 'MiB' print TASK [ceph-osd : include check_gpt.yml] ******************************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:42 Tuesday 16 April 2019 19:53:54 +0000 (0:00:00.909) 0:03:02.495 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include_tasks scenarios/collocated.yml] ************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:47 Tuesday 16 April 2019 19:53:54 +0000 (0:00:00.115) 0:03:02.611 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include_tasks scenarios/non-collocated.yml] ********************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:52 Tuesday 16 April 2019 19:53:54 +0000 (0:00:00.122) 0:03:02.733 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include_tasks scenarios/lvm.yml] ********************************************************************************************************************************************************************************************************************* task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:58 Tuesday 16 April 2019 19:53:54 +0000 (0:00:00.115) 0:03:02.849 ********* skipping: [e23-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h05-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e24-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False skipping: [e23-h07-740xd.alias.bos.scalelab.redhat.com] => changed=false skip_reason: Conditional result was False TASK [ceph-osd : include_tasks scenarios/lvm-batch.yml] *************************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:66 Tuesday 16 April 2019 19:53:54 +0000 (0:00:00.116) 0:03:02.966 ********* included: /usr/share/ceph-ansible/roles/ceph-osd/tasks/scenarios/lvm-batch.yml for e23-h05-740xd.alias.bos.scalelab.redhat.com, e24-h05-740xd.alias.bos.scalelab.redhat.com, e24-h07-740xd.alias.bos.scalelab.redhat.com, e23-h07-740xd.alias.bos.scalelab.redhat.com TASK [ceph-osd : use ceph-volume lvm batch to create bluestore osds] ************************************************************************************************************************************************************************************************** task path: /usr/share/ceph-ansible/roles/ceph-osd/tasks/scenarios/lvm-batch.yml:3 Tuesday 16 April 2019 19:53:54 +0000 (0:00:00.225) 0:03:03.192 ********* fatal: [e23-h05-740xd.alias.bos.scalelab.redhat.com]: FAILED! => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 delta: '0:00:04.637272' end: '2019-04-16 19:53:59.656130' msg: non-zero return code rc: 1 start: '2019-04-16 19:53:55.018858' stderr: |- Traceback (most recent call last): File "/sbin/ceph-volume", line 11, in load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')() File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__ self.main(self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc return f(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main terminal.dispatch(self.mapper, subcommand_args) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main terminal.dispatch(self.mapper, self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main self.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute self.strategy.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute Create(command).main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main self.create(args) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create prepare_step.safe_prepare(args) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare self.prepare() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare osd_fsid, File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore db=db File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command))) RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid db915259-57f0-4d4d-873c-743d77a6e56d --setuser ceph --setgroup ceph stderr_lines: - 'Traceback (most recent call last):' - ' File "/sbin/ceph-volume", line 11, in ' - ' load_entry_point(''ceph-volume==1.0.0'', ''console_scripts'', ''ceph-volume'')()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__' - ' self.main(self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc' - ' return f(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main' - ' terminal.dispatch(self.mapper, subcommand_args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main' - ' terminal.dispatch(self.mapper, self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main' - ' self.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute' - ' self.strategy.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute' - ' Create(command).main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main' - ' self.create(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create' - ' prepare_step.safe_prepare(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare' - ' self.prepare()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare' - ' osd_fsid,' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore' - ' db=db' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore' - ' raise RuntimeError(''Command failed with exit code %s: %s'' % (returncode, '' ''.join(command)))' - 'RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid db915259-57f0-4d4d-873c-743d77a6e56d --setuser ceph --setgroup ceph' stdout: |- Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-5e078287-f295-4507-a70f-4beaccfaee22 /dev/nvme0n1 stdout: Physical volume "/dev/nvme0n1" successfully created. stdout: Volume group "ceph-5e078287-f295-4507-a70f-4beaccfaee22" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-bbdc8615-3a07-4d86-a554-feedd7b0078e /dev/nvme1n1 stdout: Physical volume "/dev/nvme1n1" successfully created. stdout: Volume group "ceph-bbdc8615-3a07-4d86-a554-feedd7b0078e" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-a865f642-a3f2-4742-a5c4-f6fda405decb /dev/nvme2n1 stdout: Physical volume "/dev/nvme2n1" successfully created. stdout: Volume group "ceph-a865f642-a3f2-4742-a5c4-f6fda405decb" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-8d1a7ec3-38db-4b5d-a351-8412312faced /dev/nvme3n1 stdout: Physical volume "/dev/nvme3n1" successfully created. stdout: Volume group "ceph-8d1a7ec3-38db-4b5d-a351-8412312faced" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-154a4edb-cd6e-4ceb-8d0f-25364f9542cd /dev/nvme4n1 stdout: Physical volume "/dev/nvme4n1" successfully created. stdout: Volume group "ceph-154a4edb-cd6e-4ceb-8d0f-25364f9542cd" successfully created Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-f8f987dc-7d05-477d-a703-21ccb059b1ee ceph-5e078287-f295-4507-a70f-4beaccfaee22 stdout: Logical volume "osd-data-f8f987dc-7d05-477d-a703-21ccb059b1ee" created. Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-18a8f331-22e1-4a96-9f43-65048a78fd79 ceph-5e078287-f295-4507-a70f-4beaccfaee22 stdout: Logical volume "osd-data-18a8f331-22e1-4a96-9f43-65048a78fd79" created. Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new db915259-57f0-4d4d-873c-743d77a6e56d Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0 Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-0 Running command: /bin/chown -h ceph:ceph /dev/ceph-5e078287-f295-4507-a70f-4beaccfaee22/osd-data-f8f987dc-7d05-477d-a703-21ccb059b1ee Running command: /bin/chown -R ceph:ceph /dev/dm-3 Running command: /bin/ln -s /dev/ceph-5e078287-f295-4507-a70f-4beaccfaee22/osd-data-f8f987dc-7d05-477d-a703-21ccb059b1ee /var/lib/ceph/osd/ceph-0/block Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap stderr: got monmap epoch 1 Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQDVMrZcHEdsFhAAe59fPUunINGCTeXFfptAsQ== stdout: creating /var/lib/ceph/osd/ceph-0/keyring added entity osd.0 auth(key=AQDVMrZcHEdsFhAAe59fPUunINGCTeXFfptAsQ==) Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/ Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid db915259-57f0-4d4d-873c-743d77a6e56d --setuser ceph --setgroup ceph stdout: /usr/include/c++/8/bits/stl_vector.h:932: std::vector<_Tp, _Alloc>::reference std::vector<_Tp, _Alloc>::operator[](std::vector<_Tp, _Alloc>::size_type) [with _Tp = long unsigned int; _Alloc = mempool::pool_allocator<(mempool::pool_index_t)1, long unsigned int>; std::vector<_Tp, _Alloc>::reference = long unsigned int&; std::vector<_Tp, _Alloc>::size_type = long unsigned int]: Assertion '__builtin_expect(__n < this->size(), true)' failed. stderr: 2019-04-16 19:53:58.548 7f87c06e5080 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid stderr: *** Caught signal (Aborted) ** stderr: in thread 7f87c06e5080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f87bd276d80] stderr: 2: (gsignal()+0x10f) [0x7f87bbf5193f] stderr: 3: (abort()+0x127) [0x7f87bbf3bc95] stderr: 4: (()+0x65ca48) [0x55f8a5c35a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x55f8a6233a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x55f8a60ddae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x55f8a60ff5b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x55f8a61329d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x55f8a614264f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x55f8a5c55d7e] stderr: 11: (main()+0x1bd1) [0x55f8a5b4e0c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f87bbf3d813] stderr: 13: (_start()+0x2e) [0x55f8a5c342fe] stderr: 2019-04-16 19:53:59.070 7f87c06e5080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f87c06e5080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f87bd276d80] stderr: 2: (gsignal()+0x10f) [0x7f87bbf5193f] stderr: 3: (abort()+0x127) [0x7f87bbf3bc95] stderr: 4: (()+0x65ca48) [0x55f8a5c35a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x55f8a6233a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x55f8a60ddae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x55f8a60ff5b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x55f8a61329d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x55f8a614264f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x55f8a5c55d7e] stderr: 11: (main()+0x1bd1) [0x55f8a5b4e0c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f87bbf3d813] stderr: 13: (_start()+0x2e) [0x55f8a5c342fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:53:58.548 7f87c06e5080 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:53:59.070 7f87c06e5080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f87c06e5080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f87bd276d80] stderr: 2: (gsignal()+0x10f) [0x7f87bbf5193f] stderr: 3: (abort()+0x127) [0x7f87bbf3bc95] stderr: 4: (()+0x65ca48) [0x55f8a5c35a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x55f8a6233a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x55f8a60ddae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x55f8a60ff5b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x55f8a61329d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x55f8a614264f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x55f8a5c55d7e] stderr: 11: (main()+0x1bd1) [0x55f8a5b4e0c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f87bbf3d813] stderr: 13: (_start()+0x2e) [0x55f8a5c342fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:53:58.548 7f87c06e5080 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:53:59.070 7f87c06e5080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f87c06e5080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f87bd276d80] stderr: 2: (gsignal()+0x10f) [0x7f87bbf5193f] stderr: 3: (abort()+0x127) [0x7f87bbf3bc95] stderr: 4: (()+0x65ca48) [0x55f8a5c35a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x55f8a6233a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x55f8a60ddae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x55f8a60ff5b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x55f8a61329d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x55f8a614264f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x55f8a5c55d7e] stderr: 11: (main()+0x1bd1) [0x55f8a5b4e0c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f87bbf3d813] stderr: 13: (_start()+0x2e) [0x55f8a5c342fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. --> Was unable to complete a new OSD, will rollback changes Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.0 --yes-i-really-mean-it stderr: purged osd.0 stdout_lines: fatal: [e24-h05-740xd.alias.bos.scalelab.redhat.com]: FAILED! => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 delta: '0:00:04.676442' end: '2019-04-16 19:53:59.726593' msg: non-zero return code rc: 1 start: '2019-04-16 19:53:55.050151' stderr: |- Traceback (most recent call last): File "/sbin/ceph-volume", line 11, in load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')() File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__ self.main(self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc return f(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main terminal.dispatch(self.mapper, subcommand_args) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main terminal.dispatch(self.mapper, self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main self.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute self.strategy.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute Create(command).main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main self.create(args) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create prepare_step.safe_prepare(args) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare self.prepare() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare osd_fsid, File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore db=db File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command))) RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 8bb6396f-9811-493b-b3ff-2d70a2fbed46 --setuser ceph --setgroup ceph stderr_lines: - 'Traceback (most recent call last):' - ' File "/sbin/ceph-volume", line 11, in ' - ' load_entry_point(''ceph-volume==1.0.0'', ''console_scripts'', ''ceph-volume'')()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__' - ' self.main(self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc' - ' return f(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main' - ' terminal.dispatch(self.mapper, subcommand_args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main' - ' terminal.dispatch(self.mapper, self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main' - ' self.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute' - ' self.strategy.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute' - ' Create(command).main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main' - ' self.create(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create' - ' prepare_step.safe_prepare(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare' - ' self.prepare()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare' - ' osd_fsid,' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore' - ' db=db' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore' - ' raise RuntimeError(''Command failed with exit code %s: %s'' % (returncode, '' ''.join(command)))' - 'RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 8bb6396f-9811-493b-b3ff-2d70a2fbed46 --setuser ceph --setgroup ceph' stdout: |- Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-5326ce2a-9d43-48d3-a9e0-a91aa5222c77 /dev/nvme0n1 stdout: Physical volume "/dev/nvme0n1" successfully created. stdout: Volume group "ceph-5326ce2a-9d43-48d3-a9e0-a91aa5222c77" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-e52a096d-775e-4c16-b0ca-444bc0fea87e /dev/nvme1n1 stdout: Physical volume "/dev/nvme1n1" successfully created. stdout: Volume group "ceph-e52a096d-775e-4c16-b0ca-444bc0fea87e" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-5f35ea12-30fd-49a8-b9d1-96bef2c3a2ed /dev/nvme2n1 stdout: Physical volume "/dev/nvme2n1" successfully created. stdout: Volume group "ceph-5f35ea12-30fd-49a8-b9d1-96bef2c3a2ed" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-a359f956-4a34-4e18-8c77-5660dd9ed2ea /dev/nvme3n1 stdout: Physical volume "/dev/nvme3n1" successfully created. stdout: Volume group "ceph-a359f956-4a34-4e18-8c77-5660dd9ed2ea" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-68a0ec96-7ed4-44db-95fc-80d96b3613fc /dev/nvme4n1 stdout: Physical volume "/dev/nvme4n1" successfully created. stdout: Volume group "ceph-68a0ec96-7ed4-44db-95fc-80d96b3613fc" successfully created Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-abf7dd40-c36d-4bfd-b7ee-828531c5ab4c ceph-5326ce2a-9d43-48d3-a9e0-a91aa5222c77 stdout: Logical volume "osd-data-abf7dd40-c36d-4bfd-b7ee-828531c5ab4c" created. Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-88eb326b-bffc-4568-a5a7-2af4c5bd051c ceph-5326ce2a-9d43-48d3-a9e0-a91aa5222c77 stdout: Logical volume "osd-data-88eb326b-bffc-4568-a5a7-2af4c5bd051c" created. Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 8bb6396f-9811-493b-b3ff-2d70a2fbed46 Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-1 Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-1 Running command: /bin/chown -h ceph:ceph /dev/ceph-5326ce2a-9d43-48d3-a9e0-a91aa5222c77/osd-data-abf7dd40-c36d-4bfd-b7ee-828531c5ab4c Running command: /bin/chown -R ceph:ceph /dev/dm-3 Running command: /bin/ln -s /dev/ceph-5326ce2a-9d43-48d3-a9e0-a91aa5222c77/osd-data-abf7dd40-c36d-4bfd-b7ee-828531c5ab4c /var/lib/ceph/osd/ceph-1/block Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-1/activate.monmap stderr: got monmap epoch 1 Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-1/keyring --create-keyring --name osd.1 --add-key AQDVMrZcXezSGRAAqtHgaG1BnJabq9LiMOGiMw== stdout: creating /var/lib/ceph/osd/ceph-1/keyring added entity osd.1 auth(key=AQDVMrZcXezSGRAAqtHgaG1BnJabq9LiMOGiMw==) Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/keyring Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-1/ Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 1 --monmap /var/lib/ceph/osd/ceph-1/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-1/ --osd-uuid 8bb6396f-9811-493b-b3ff-2d70a2fbed46 --setuser ceph --setgroup ceph stdout: /usr/include/c++/8/bits/stl_vector.h:932: std::vector<_Tp, _Alloc>::reference std::vector<_Tp, _Alloc>::operator[](std::vector<_Tp, _Alloc>::size_type) [with _Tp = long unsigned int; _Alloc = mempool::pool_allocator<(mempool::pool_index_t)1, long unsigned int>; std::vector<_Tp, _Alloc>::reference = long unsigned int&; std::vector<_Tp, _Alloc>::size_type = long unsigned int]: Assertion '__builtin_expect(__n < this->size(), true)' failed. stderr: 2019-04-16 19:53:58.619 7f5f5a90c080 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid stderr: *** Caught signal (Aborted) ** stderr: in thread 7f5f5a90c080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f5f5749ed80] stderr: 2: (gsignal()+0x10f) [0x7f5f5617993f] stderr: 3: (abort()+0x127) [0x7f5f56163c95] stderr: 4: (()+0x65ca48) [0x560626a17a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x560627015a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x560626ebfae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x560626ee15b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x560626f149d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x560626f2464f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x560626a37d7e] stderr: 11: (main()+0x1bd1) [0x5606269300c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f5f56165813] stderr: 13: (_start()+0x2e) [0x560626a162fe] stderr: 2019-04-16 19:53:59.141 7f5f5a90c080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f5f5a90c080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f5f5749ed80] stderr: 2: (gsignal()+0x10f) [0x7f5f5617993f] stderr: 3: (abort()+0x127) [0x7f5f56163c95] stderr: 4: (()+0x65ca48) [0x560626a17a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x560627015a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x560626ebfae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x560626ee15b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x560626f149d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x560626f2464f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x560626a37d7e] stderr: 11: (main()+0x1bd1) [0x5606269300c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f5f56165813] stderr: 13: (_start()+0x2e) [0x560626a162fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:53:58.619 7f5f5a90c080 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:53:59.141 7f5f5a90c080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f5f5a90c080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f5f5749ed80] stderr: 2: (gsignal()+0x10f) [0x7f5f5617993f] stderr: 3: (abort()+0x127) [0x7f5f56163c95] stderr: 4: (()+0x65ca48) [0x560626a17a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x560627015a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x560626ebfae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x560626ee15b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x560626f149d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x560626f2464f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x560626a37d7e] stderr: 11: (main()+0x1bd1) [0x5606269300c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f5f56165813] stderr: 13: (_start()+0x2e) [0x560626a162fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:53:58.619 7f5f5a90c080 -1 bluestore(/var/lib/ceph/osd/ceph-1/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:53:59.141 7f5f5a90c080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f5f5a90c080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f5f5749ed80] stderr: 2: (gsignal()+0x10f) [0x7f5f5617993f] stderr: 3: (abort()+0x127) [0x7f5f56163c95] stderr: 4: (()+0x65ca48) [0x560626a17a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x560627015a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x560626ebfae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x560626ee15b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x560626f149d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x560626f2464f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x560626a37d7e] stderr: 11: (main()+0x1bd1) [0x5606269300c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f5f56165813] stderr: 13: (_start()+0x2e) [0x560626a162fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. --> Was unable to complete a new OSD, will rollback changes Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.1 --yes-i-really-mean-it stderr: purged osd.1 stdout_lines: fatal: [e23-h07-740xd.alias.bos.scalelab.redhat.com]: FAILED! => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 delta: '0:00:04.703394' end: '2019-04-16 19:53:59.805745' msg: non-zero return code rc: 1 start: '2019-04-16 19:53:55.102351' stderr: |- Traceback (most recent call last): File "/sbin/ceph-volume", line 11, in load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')() File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__ self.main(self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc return f(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main terminal.dispatch(self.mapper, subcommand_args) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main terminal.dispatch(self.mapper, self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main self.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute self.strategy.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute Create(command).main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main self.create(args) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create prepare_step.safe_prepare(args) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare self.prepare() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare osd_fsid, File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore db=db File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command))) RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid beb75dcc-5613-4581-bca0-9c0bd26fd59d --setuser ceph --setgroup ceph stderr_lines: - 'Traceback (most recent call last):' - ' File "/sbin/ceph-volume", line 11, in ' - ' load_entry_point(''ceph-volume==1.0.0'', ''console_scripts'', ''ceph-volume'')()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__' - ' self.main(self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc' - ' return f(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main' - ' terminal.dispatch(self.mapper, subcommand_args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main' - ' terminal.dispatch(self.mapper, self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main' - ' self.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute' - ' self.strategy.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute' - ' Create(command).main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main' - ' self.create(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create' - ' prepare_step.safe_prepare(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare' - ' self.prepare()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare' - ' osd_fsid,' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore' - ' db=db' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore' - ' raise RuntimeError(''Command failed with exit code %s: %s'' % (returncode, '' ''.join(command)))' - 'RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid beb75dcc-5613-4581-bca0-9c0bd26fd59d --setuser ceph --setgroup ceph' stdout: |- Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-803e7d9c-5b06-4d33-8f3a-82421ada9b65 /dev/nvme0n1 stdout: Physical volume "/dev/nvme0n1" successfully created. stdout: Volume group "ceph-803e7d9c-5b06-4d33-8f3a-82421ada9b65" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-954e11d7-820a-4ff6-bede-eac519568870 /dev/nvme1n1 stdout: Physical volume "/dev/nvme1n1" successfully created. stdout: Volume group "ceph-954e11d7-820a-4ff6-bede-eac519568870" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-a9f285a3-0f77-43d7-bd25-8baea4f3c8d6 /dev/nvme2n1 stdout: Physical volume "/dev/nvme2n1" successfully created. stdout: Volume group "ceph-a9f285a3-0f77-43d7-bd25-8baea4f3c8d6" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-174e5139-d94c-4ff6-9684-6d7b86c0a6b9 /dev/nvme3n1 stdout: Physical volume "/dev/nvme3n1" successfully created. stdout: Volume group "ceph-174e5139-d94c-4ff6-9684-6d7b86c0a6b9" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-c0125fa1-4446-411b-b44d-a21993f28690 /dev/nvme4n1 stdout: Physical volume "/dev/nvme4n1" successfully created. stdout: Volume group "ceph-c0125fa1-4446-411b-b44d-a21993f28690" successfully created Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-5c16eca5-43bb-4fe6-93f3-30204b66cbfa ceph-803e7d9c-5b06-4d33-8f3a-82421ada9b65 stdout: Logical volume "osd-data-5c16eca5-43bb-4fe6-93f3-30204b66cbfa" created. Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-c9c005f2-ee5d-4946-888d-0b05445b5aaf ceph-803e7d9c-5b06-4d33-8f3a-82421ada9b65 stdout: Logical volume "osd-data-c9c005f2-ee5d-4946-888d-0b05445b5aaf" created. Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new beb75dcc-5613-4581-bca0-9c0bd26fd59d Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-3 Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-3 Running command: /bin/chown -h ceph:ceph /dev/ceph-803e7d9c-5b06-4d33-8f3a-82421ada9b65/osd-data-5c16eca5-43bb-4fe6-93f3-30204b66cbfa Running command: /bin/chown -R ceph:ceph /dev/dm-3 Running command: /bin/ln -s /dev/ceph-803e7d9c-5b06-4d33-8f3a-82421ada9b65/osd-data-5c16eca5-43bb-4fe6-93f3-30204b66cbfa /var/lib/ceph/osd/ceph-3/block Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-3/activate.monmap stderr: got monmap epoch 1 Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-3/keyring --create-keyring --name osd.3 --add-key AQDVMrZcYA8yHRAAFFuwB0MN9xAUVCpNcYRFRA== stdout: creating /var/lib/ceph/osd/ceph-3/keyring added entity osd.3 auth(key=AQDVMrZcYA8yHRAAFFuwB0MN9xAUVCpNcYRFRA==) Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/keyring Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-3/ Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 3 --monmap /var/lib/ceph/osd/ceph-3/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-3/ --osd-uuid beb75dcc-5613-4581-bca0-9c0bd26fd59d --setuser ceph --setgroup ceph stdout: /usr/include/c++/8/bits/stl_vector.h:932: std::vector<_Tp, _Alloc>::reference std::vector<_Tp, _Alloc>::operator[](std::vector<_Tp, _Alloc>::size_type) [with _Tp = long unsigned int; _Alloc = mempool::pool_allocator<(mempool::pool_index_t)1, long unsigned int>; std::vector<_Tp, _Alloc>::reference = long unsigned int&; std::vector<_Tp, _Alloc>::size_type = long unsigned int]: Assertion '__builtin_expect(__n < this->size(), true)' failed. stderr: 2019-04-16 19:53:58.694 7f65a0945080 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid stderr: *** Caught signal (Aborted) ** stderr: in thread 7f65a0945080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f659d4d6d80] stderr: 2: (gsignal()+0x10f) [0x7f659c1b193f] stderr: 3: (abort()+0x127) [0x7f659c19bc95] stderr: 4: (()+0x65ca48) [0x557bf6e48a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x557bf7446a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x557bf72f0ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x557bf73125b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x557bf73459d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x557bf735564f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x557bf6e68d7e] stderr: 11: (main()+0x1bd1) [0x557bf6d610c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f659c19d813] stderr: 13: (_start()+0x2e) [0x557bf6e472fe] stderr: 2019-04-16 19:53:59.216 7f65a0945080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f65a0945080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f659d4d6d80] stderr: 2: (gsignal()+0x10f) [0x7f659c1b193f] stderr: 3: (abort()+0x127) [0x7f659c19bc95] stderr: 4: (()+0x65ca48) [0x557bf6e48a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x557bf7446a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x557bf72f0ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x557bf73125b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x557bf73459d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x557bf735564f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x557bf6e68d7e] stderr: 11: (main()+0x1bd1) [0x557bf6d610c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f659c19d813] stderr: 13: (_start()+0x2e) [0x557bf6e472fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:53:58.694 7f65a0945080 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:53:59.216 7f65a0945080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f65a0945080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f659d4d6d80] stderr: 2: (gsignal()+0x10f) [0x7f659c1b193f] stderr: 3: (abort()+0x127) [0x7f659c19bc95] stderr: 4: (()+0x65ca48) [0x557bf6e48a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x557bf7446a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x557bf72f0ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x557bf73125b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x557bf73459d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x557bf735564f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x557bf6e68d7e] stderr: 11: (main()+0x1bd1) [0x557bf6d610c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f659c19d813] stderr: 13: (_start()+0x2e) [0x557bf6e472fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:53:58.694 7f65a0945080 -1 bluestore(/var/lib/ceph/osd/ceph-3/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:53:59.216 7f65a0945080 -1 *** Caught signal (Aborted) ** stderr: in thread 7f65a0945080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7f659d4d6d80] stderr: 2: (gsignal()+0x10f) [0x7f659c1b193f] stderr: 3: (abort()+0x127) [0x7f659c19bc95] stderr: 4: (()+0x65ca48) [0x557bf6e48a48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x557bf7446a87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x557bf72f0ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x557bf73125b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x557bf73459d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x557bf735564f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x557bf6e68d7e] stderr: 11: (main()+0x1bd1) [0x557bf6d610c1] stderr: 12: (__libc_start_main()+0xf3) [0x7f659c19d813] stderr: 13: (_start()+0x2e) [0x557bf6e472fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. --> Was unable to complete a new OSD, will rollback changes Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.3 --yes-i-really-mean-it stderr: purged osd.3 stdout_lines: fatal: [e24-h07-740xd.alias.bos.scalelab.redhat.com]: FAILED! => changed=true cmd: - ceph-volume - --cluster - ceph - lvm - batch - --bluestore - --yes - --osds-per-device - '2' - /dev/nvme0n1 - /dev/nvme1n1 - /dev/nvme2n1 - /dev/nvme3n1 - /dev/nvme4n1 delta: '0:00:09.701445' end: '2019-04-16 19:54:04.773178' msg: non-zero return code rc: 1 start: '2019-04-16 19:53:55.071733' stderr: |- Traceback (most recent call last): File "/sbin/ceph-volume", line 11, in load_entry_point('ceph-volume==1.0.0', 'console_scripts', 'ceph-volume')() File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__ self.main(self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc return f(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main terminal.dispatch(self.mapper, subcommand_args) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main terminal.dispatch(self.mapper, self.argv) File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch instance.main() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main self.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute self.strategy.execute() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute Create(command).main() File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main self.create(args) File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create prepare_step.safe_prepare(args) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare self.prepare() File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root return func(*a, **kw) File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare osd_fsid, File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore db=db File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore raise RuntimeError('Command failed with exit code %s: %s' % (returncode, ' '.join(command))) RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 7ba34a4b-8994-429c-a9ec-f205b0f5fec1 --setuser ceph --setgroup ceph stderr_lines: - 'Traceback (most recent call last):' - ' File "/sbin/ceph-volume", line 11, in ' - ' load_entry_point(''ceph-volume==1.0.0'', ''console_scripts'', ''ceph-volume'')()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 38, in __init__' - ' self.main(self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 59, in newfunc' - ' return f(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/main.py", line 148, in main' - ' terminal.dispatch(self.mapper, subcommand_args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/main.py", line 40, in main' - ' terminal.dispatch(self.mapper, self.argv)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/terminal.py", line 182, in dispatch' - ' instance.main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 325, in main' - ' self.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/batch.py", line 288, in execute' - ' self.strategy.execute()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/strategies/bluestore.py", line 124, in execute' - ' Create(command).main()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 69, in main' - ' self.create(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/create.py", line 26, in create' - ' prepare_step.safe_prepare(args)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 219, in safe_prepare' - ' self.prepare()' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/decorators.py", line 16, in is_root' - ' return func(*a, **kw)' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 320, in prepare' - ' osd_fsid,' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/devices/lvm/prepare.py", line 119, in prepare_bluestore' - ' db=db' - ' File "/usr/lib/python3.6/site-packages/ceph_volume/util/prepare.py", line 430, in osd_mkfs_bluestore' - ' raise RuntimeError(''Command failed with exit code %s: %s'' % (returncode, '' ''.join(command)))' - 'RuntimeError: Command failed with exit code 250: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 7ba34a4b-8994-429c-a9ec-f205b0f5fec1 --setuser ceph --setgroup ceph' stdout: |- Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-aef75bc3-aec3-472c-90b3-b4c1395335e2 /dev/nvme0n1 stdout: Physical volume "/dev/nvme0n1" successfully created. stdout: Volume group "ceph-aef75bc3-aec3-472c-90b3-b4c1395335e2" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-587f32d7-8d1d-4f80-874d-fc0416c250a0 /dev/nvme1n1 stdout: Physical volume "/dev/nvme1n1" successfully created. stdout: Volume group "ceph-587f32d7-8d1d-4f80-874d-fc0416c250a0" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-56cecd76-1cb0-468a-8595-5c8c7034d9ae /dev/nvme2n1 stdout: Physical volume "/dev/nvme2n1" successfully created. stdout: Volume group "ceph-56cecd76-1cb0-468a-8595-5c8c7034d9ae" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-b0988122-77ec-4ed6-a692-d1761a3dd5f4 /dev/nvme3n1 stdout: Physical volume "/dev/nvme3n1" successfully created. stdout: Volume group "ceph-b0988122-77ec-4ed6-a692-d1761a3dd5f4" successfully created Running command: /usr/sbin/vgcreate -s 1G --force --yes ceph-029f1dd0-4b38-4e8b-9bde-ce718cf8fbab /dev/nvme4n1 stdout: Physical volume "/dev/nvme4n1" successfully created. stdout: Volume group "ceph-029f1dd0-4b38-4e8b-9bde-ce718cf8fbab" successfully created Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-abb6b2ce-e27d-4041-bb3c-0b3acf19dc67 ceph-aef75bc3-aec3-472c-90b3-b4c1395335e2 stdout: Logical volume "osd-data-abb6b2ce-e27d-4041-bb3c-0b3acf19dc67" created. Running command: /usr/sbin/lvcreate --yes -l 372 -n osd-data-6c7b9193-109d-45fa-8473-9c722104518d ceph-aef75bc3-aec3-472c-90b3-b4c1395335e2 stdout: Logical volume "osd-data-6c7b9193-109d-45fa-8473-9c722104518d" created. Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring -i - osd new 7ba34a4b-8994-429c-a9ec-f205b0f5fec1 Running command: /bin/ceph-authtool --gen-print-key Running command: /bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-2 Running command: /usr/sbin/restorecon /var/lib/ceph/osd/ceph-2 Running command: /bin/chown -h ceph:ceph /dev/ceph-aef75bc3-aec3-472c-90b3-b4c1395335e2/osd-data-abb6b2ce-e27d-4041-bb3c-0b3acf19dc67 Running command: /bin/chown -R ceph:ceph /dev/dm-3 Running command: /bin/ln -s /dev/ceph-aef75bc3-aec3-472c-90b3-b4c1395335e2/osd-data-abb6b2ce-e27d-4041-bb3c-0b3acf19dc67 /var/lib/ceph/osd/ceph-2/block Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-2/activate.monmap stderr: got monmap epoch 1 Running command: /bin/ceph-authtool /var/lib/ceph/osd/ceph-2/keyring --create-keyring --name osd.2 --add-key AQDVMrZcZCWxHBAAae9w2FcYOVwYZJj1g3iN3Q== stdout: creating /var/lib/ceph/osd/ceph-2/keyring added entity osd.2 auth(key=AQDVMrZcZCWxHBAAae9w2FcYOVwYZJj1g3iN3Q==) Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/keyring Running command: /bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-2/ Running command: /bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 2 --monmap /var/lib/ceph/osd/ceph-2/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-2/ --osd-uuid 7ba34a4b-8994-429c-a9ec-f205b0f5fec1 --setuser ceph --setgroup ceph stdout: /usr/include/c++/8/bits/stl_vector.h:932: std::vector<_Tp, _Alloc>::reference std::vector<_Tp, _Alloc>::operator[](std::vector<_Tp, _Alloc>::size_type) [with _Tp = long unsigned int; _Alloc = mempool::pool_allocator<(mempool::pool_index_t)1, long unsigned int>; std::vector<_Tp, _Alloc>::reference = long unsigned int&; std::vector<_Tp, _Alloc>::size_type = long unsigned int]: Assertion '__builtin_expect(__n < this->size(), true)' failed. stderr: 2019-04-16 19:54:03.669 7ff173cdb080 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid stderr: *** Caught signal (Aborted) ** stderr: in thread 7ff173cdb080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7ff17086cd80] stderr: 2: (gsignal()+0x10f) [0x7ff16f54793f] stderr: 3: (abort()+0x127) [0x7ff16f531c95] stderr: 4: (()+0x65ca48) [0x5608ead2ea48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x5608eb32ca87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x5608eb1d6ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x5608eb1f85b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x5608eb22b9d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x5608eb23b64f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x5608ead4ed7e] stderr: 11: (main()+0x1bd1) [0x5608eac470c1] stderr: 12: (__libc_start_main()+0xf3) [0x7ff16f533813] stderr: 13: (_start()+0x2e) [0x5608ead2d2fe] stderr: 2019-04-16 19:54:04.192 7ff173cdb080 -1 *** Caught signal (Aborted) ** stderr: in thread 7ff173cdb080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7ff17086cd80] stderr: 2: (gsignal()+0x10f) [0x7ff16f54793f] stderr: 3: (abort()+0x127) [0x7ff16f531c95] stderr: 4: (()+0x65ca48) [0x5608ead2ea48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x5608eb32ca87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x5608eb1d6ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x5608eb1f85b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x5608eb22b9d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x5608eb23b64f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x5608ead4ed7e] stderr: 11: (main()+0x1bd1) [0x5608eac470c1] stderr: 12: (__libc_start_main()+0xf3) [0x7ff16f533813] stderr: 13: (_start()+0x2e) [0x5608ead2d2fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:54:03.669 7ff173cdb080 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:54:04.192 7ff173cdb080 -1 *** Caught signal (Aborted) ** stderr: in thread 7ff173cdb080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7ff17086cd80] stderr: 2: (gsignal()+0x10f) [0x7ff16f54793f] stderr: 3: (abort()+0x127) [0x7ff16f531c95] stderr: 4: (()+0x65ca48) [0x5608ead2ea48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x5608eb32ca87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x5608eb1d6ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x5608eb1f85b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x5608eb22b9d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x5608eb23b64f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x5608ead4ed7e] stderr: 11: (main()+0x1bd1) [0x5608eac470c1] stderr: 12: (__libc_start_main()+0xf3) [0x7ff16f533813] stderr: 13: (_start()+0x2e) [0x5608ead2d2fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. stderr: -387> 2019-04-16 19:54:03.669 7ff173cdb080 -1 bluestore(/var/lib/ceph/osd/ceph-2/) _read_fsid unparsable uuid stderr: 0> 2019-04-16 19:54:04.192 7ff173cdb080 -1 *** Caught signal (Aborted) ** stderr: in thread 7ff173cdb080 thread_name:ceph-osd stderr: ceph version 14.2.0-142-g2f9c072 (2f9c0720b5aed4c9e25e8b050e71856df0a986ad) nautilus (stable) stderr: 1: (()+0x12d80) [0x7ff17086cd80] stderr: 2: (gsignal()+0x10f) [0x7ff16f54793f] stderr: 3: (abort()+0x127) [0x7ff16f531c95] stderr: 4: (()+0x65ca48) [0x5608ead2ea48] stderr: 5: (BitmapAllocator::init_add_free(unsigned long, unsigned long)+0x857) [0x5608eb32ca87] stderr: 6: (BlueStore::_open_alloc()+0x193) [0x5608eb1d6ae3] stderr: 7: (BlueStore::_open_db_and_around(bool)+0xa6) [0x5608eb1f85b6] stderr: 8: (BlueStore::_fsck(bool, bool)+0x587) [0x5608eb22b9d7] stderr: 9: (BlueStore::mkfs()+0x141f) [0x5608eb23b64f] stderr: 10: (OSD::mkfs(CephContext*, ObjectStore*, uuid_d, int)+0x1ae) [0x5608ead4ed7e] stderr: 11: (main()+0x1bd1) [0x5608eac470c1] stderr: 12: (__libc_start_main()+0xf3) [0x7ff16f533813] stderr: 13: (_start()+0x2e) [0x5608ead2d2fe] stderr: NOTE: a copy of the executable, or `objdump -rdS ` is needed to interpret this. --> Was unable to complete a new OSD, will rollback changes Running command: /bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.2 --yes-i-really-mean-it stderr: purged osd.2 stdout_lines: NO MORE HOSTS LEFT **************************************************************************************************************************************************************************************************************************************************** PLAY RECAP ************************************************************************************************************************************************************************************************************************************************************ e23-h05-740xd.alias.bos.scalelab.redhat.com : ok=92 changed=5 unreachable=0 failed=1 e23-h07-740xd.alias.bos.scalelab.redhat.com : ok=90 changed=5 unreachable=0 failed=1 e24-h05-740xd.alias.bos.scalelab.redhat.com : ok=90 changed=4 unreachable=0 failed=1 e24-h07-740xd.alias.bos.scalelab.redhat.com : ok=90 changed=5 unreachable=0 failed=1 e24-h17-740xd.alias.bos.scalelab.redhat.com : ok=156 changed=6 unreachable=0 failed=0 e24-h19-740xd.alias.bos.scalelab.redhat.com : ok=144 changed=4 unreachable=0 failed=0 e24-h21-740xd.alias.bos.scalelab.redhat.com : ok=145 changed=4 unreachable=0 failed=0 INSTALLER STATUS ****************************************************************************************************************************************************************************************************************************************************** Install Ceph Monitor : Complete (0:01:11) Install Ceph Manager : Complete (0:00:23) Install Ceph OSD : In Progress (0:00:53) This phase can be restarted by running: roles/ceph-osd/tasks/main.yml Tuesday 16 April 2019 19:54:04 +0000 (0:00:09.932) 0:03:13.124 ********* =============================================================================== ceph-handler : restart ceph mon daemon(s) - non container ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 31.76s /usr/share/ceph-ansible/roles/ceph-handler/handlers/main.yml:29 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ceph-config : generate ceph configuration file: ceph.conf ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 13.93s /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:83 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-config : generate ceph configuration file: ceph.conf ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 10.82s /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:83 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-osd : use ceph-volume lvm batch to create bluestore osds -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 9.93s /usr/share/ceph-ansible/roles/ceph-osd/tasks/scenarios/lvm-batch.yml:3 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- gather and delegate facts -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 5.93s /usr/share/ceph-ansible/site.yml:38 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-validate : validate provided configuration ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 3.24s /usr/share/ceph-ansible/roles/ceph-validate/tasks/main.yml:2 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-infra : start firewalld ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 2.29s /usr/share/ceph-ansible/roles/ceph-infra/tasks/configure_firewall.yml:16 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-mon : create ceph mgr keyring(s) -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.98s /usr/share/ceph-ansible/roles/ceph-mon/tasks/ceph_keys.yml:33 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-facts : set_fact ceph_current_status (convert to json) ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.57s /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:84 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-facts : get default crush rule value from ceph configuration ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.29s /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:257 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-mgr : systemd start mgr ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.28s /usr/share/ceph-ansible/roles/ceph-mgr/tasks/start_mgr.yml:33 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-handler : check for a ceph mon socket --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.24s /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:2 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ ceph-handler : check for a ceph osd socket --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.21s /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:30 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-infra : disable ntpd -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.19s /usr/share/ceph-ansible/roles/ceph-infra/handlers/main.yml:2 --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-handler : check for a ceph mgr socket --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.18s /usr/share/ceph-ansible/roles/ceph-handler/tasks/check_socket_non_container.yml:117 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-common : install redhat dependencies ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.14s /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_redhat_packages.yml:2 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-config : run 'ceph-volume lvm batch --report' to see how many osds are to be created ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.08s /usr/share/ceph-ansible/roles/ceph-config/tasks/main.yml:22 ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-common : install redhat dependencies ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.06s /usr/share/ceph-ansible/roles/ceph-common/tasks/installs/install_redhat_packages.yml:2 ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-osd : install dependencies -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.00s /usr/share/ceph-ansible/roles/ceph-osd/tasks/main.yml:5 -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ceph-facts : set_fact ceph_current_status (convert to json) ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- 1.00s /usr/share/ceph-ansible/roles/ceph-facts/tasks/facts.yml:84 ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------