Project

General

Profile

Actions

Bug #51025

open

ceph cluster add ramdisk as osd fail(RuntimeError: command returned non-zero exit status: 5)

Added by tom zhang almost 3 years ago.

Status:
New
Priority:
Normal
Assignee:
-
Target version:
% Done:

0%

Source:
Tags:
Backport:
Regression:
No
Severity:
3 - minor
Reviewed:
Affected Versions:
ceph-qa-suite:
Pull request ID:
Crash signature (v1):
Crash signature (v2):

Description

i want to use ramdisk as osd ,but add fail ;
can you give me some suggestion?

ceph version 16.2.3 (381b476cb3900f9a92eb95d03b4850b953cfd79a) pacific (stable)

root@u20node1:/etc/ceph# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
loop0 7:0 0 219M 1 loop /snap/gnome-3-34-1804/66
loop1 7:1 0 240.8M 1 loop /snap/gnome-3-34-1804/24
loop2 7:2 0 62.1M 1 loop /snap/gtk-common-themes/1506
loop3 7:3 0 55.5M 1 loop /snap/core18/1997
loop4 7:4 0 49.8M 1 loop /snap/snap-store/433
loop5 7:5 0 65.1M 1 loop /snap/gtk-common-themes/1515
loop6 7:6 0 55.4M 1 loop /snap/core18/2066
loop7 7:7 0 32.3M 1 loop /snap/snapd/11588
loop8 7:8 0 32.1M 1 loop /snap/snapd/11841
loop9 7:9 0 51M 1 loop /snap/snap-store/518
sda 8:0 0 30G 0 disk
├─sda1 8:1 0 512M 0 part /boot/efi
├─sda2 8:2 0 1K 0 part
└─sda5 8:5 0 29.5G 0 part /
zram0 252:0 0 6.4G 0 disk
zram1 252:1 0 6.4G 0 disk
zram2 252:2 0 6.4G 0 disk

root@u20node1:/etc/ceph# ceph -s
cluster:
id: 1b149c98-bf8f-11eb-9a42-a118eae474c1
health: HEALTH_WARN
OSD count 0 < osd_pool_default_size 3

services:
mon: 1 daemons, quorum u20node1 (age 10m)
mgr: u20node1.yxfpth(active, since 10m)
osd: 0 osds: 0 up, 0 in (since 40m)
data:
pools: 0 pools, 0 pgs
objects: 0 objects, 0 B
usage: 0 B used, 0 B / 0 B avail
pgs:

log as below:

root@u20node1:~# ceph orch daemon add osd u20node1:/dev/zram0

Error EINVAL: Traceback (most recent call last):
File "/usr/share/ceph/mgr/mgr_module.py", line 1335, in _handle_command
return self.handle_command(inbuf, cmd)
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 167, in handle_command
return dispatch[cmd['prefix']].call(self, cmd, inbuf)
File "/usr/share/ceph/mgr/mgr_module.py", line 389, in call
return self.func(mgr, **kwargs)
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 107, in <lambda>
wrapper_copy = lambda *l_args, **l_kwargs: wrapper(*l_args, **l_kwargs) # noqa: E731
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 96, in wrapper
return func(*args, **kwargs)
File "/usr/share/ceph/mgr/orchestrator/module.py", line 795, in _daemon_add_osd
raise_if_exception(completion)
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 224, in raise_if_exception
raise e
RuntimeError: cephadm exited with an error code: 1, stderr:/usr/bin/docker: --> passed data devices: 1 physical, 0 LVM
/usr/bin/docker: --> relative data size: 1.0
/usr/bin/docker: Running command: /usr/bin/ceph-authtool --gen-print-key
/usr/bin/docker: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring i - osd new 7bce40b1-1ec7-4c7b-b002-c6c4f1f4bcfd
/usr/bin/docker: Running command: /usr/sbin/vgcreate --force --yes ceph-bc5ab7ee-ccb7-41d4-8531-bbeacab4826d /dev/zram0
/usr/bin/docker: stderr: Device /dev/zram0 excluded by a filter.
/usr/bin/docker: -
> Was unable to complete a new OSD, will rollback changes
/usr/bin/docker: Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring osd purge-new osd.0 --yes-i-really-mean-it
/usr/bin/docker: stderr: purged osd.0
/usr/bin/docker: --> RuntimeError: command returned non-zero exit status: 5
Traceback (most recent call last):
File "/var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/cephadm.30cb78bdbbafb384af862e1c2292b944f15942b586128e91262b43e91e11ae90", line 8029, in <module>
main()
File "/var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/cephadm.30cb78bdbbafb384af862e1c2292b944f15942b586128e91262b43e91e11ae90", line 8017, in main
r = ctx.func(ctx)
File "/var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/cephadm.30cb78bdbbafb384af862e1c2292b944f15942b586128e91262b43e91e11ae90", line 1654, in _infer_fsid
return func(ctx)
File "/var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/cephadm.30cb78bdbbafb384af862e1c2292b944f15942b586128e91262b43e91e11ae90", line 1738, in _infer_image
return func(ctx)
File "/var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/cephadm.30cb78bdbbafb384af862e1c2292b944f15942b586128e91262b43e91e11ae90", line 4514, in command_ceph_volume
out, err, code = call_throws(ctx, c.run_cmd(), verbosity=verbosity)
File "/var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/cephadm.30cb78bdbbafb384af862e1c2292b944f15942b586128e91262b43e91e11ae90", line 1464, in call_throws
raise RuntimeError('Failed command: %s' % ' '.join(command))
RuntimeError: Failed command: /usr/bin/docker run --rm --ipc=host --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=docker.io/ceph/ceph@sha256:54e95ae1e11404157d7b329d0bef866ebbb214b195a009e87aae4eba9d282949 -e NODE_NAME=u20node1 -e CEPH_USE_RANDOM_NONCE=1 -e CEPH_VOLUME_OSDSPEC_AFFINITY=None -v /var/run/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1:/var/run/ceph:z -v /var/log/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1:/var/log/ceph:z -v /var/lib/ceph/1b149c98-bf8f-11eb-9a42-a118eae474c1/crash:/var/lib/ceph/crash:z -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /tmp/ceph-tmp1yd0tbn1:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmpco9uppjf:/var/lib/ceph/bootstrap-osd/ceph.keyring:z docker.io/ceph/ceph@sha256:54e95ae1e11404157d7b329d0bef866ebbb214b195a009e87aae4eba9d282949 lvm batch --no-auto /dev/zram0 --yes --no-systemd

No data to display

Actions

Also available in: Atom PDF