Project

General

Profile

Actions

Bug #56886

open

ceph orch deamon osd add can not apply to partion of device,but ceph-volume lvm create command cloud.

Added by xiaoliang yang over 1 year ago. Updated about 1 year ago.

Status:
New
Priority:
Normal
Assignee:
-
Category:
orchestrator
Target version:
-
% Done:

0%

Source:
Tags:
v16.2.7
Backport:
Regression:
No
Severity:
3 - minor
Reviewed:
Affected Versions:
ceph-qa-suite:
ceph-disk
Pull request ID:
Crash signature (v1):
Crash signature (v2):

Description

[root@node1 ~]# ceph orch daemon add osd node1:/dev/vdb1
Error EINVAL: Traceback (most recent call last):
File "/usr/share/ceph/mgr/mgr_module.py", line 1384, in _handle_command
return self.handle_command(inbuf, cmd)
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 168, in handle_command
return dispatch[cmd['prefix']].call(self, cmd, inbuf)
File "/usr/share/ceph/mgr/mgr_module.py", line 397, in call
return self.func(mgr, **kwargs)
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 107, in <lambda>
wrapper_copy = lambda *l_args, **l_kwargs: wrapper(*l_args, **l_kwargs) # noqa: E731
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 96, in wrapper
return func(*args, **kwargs)
File "/usr/share/ceph/mgr/orchestrator/module.py", line 818, in _daemon_add_osd
raise_if_exception(completion)
File "/usr/share/ceph/mgr/orchestrator/_interface.py", line 225, in raise_if_exception
raise e
RuntimeError: cephadm exited with an error code: 1, stderr:Inferring config /var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/mon.node1/config
Non-zero exit code 2 from /bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.io/ceph/ceph@sha256:00965b7e88c0cef116e6a47107051a4bfe952139e7b94c6cefd6607cf38a3f0f -e NODE_NAME=node1 -e CEPH_USE_RANDOM_NONCE=1 -e CEPH_VOLUME_OSDSPEC_AFFINITY=None -v /var/run/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d:/var/run/ceph:z -v /var/log/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d:/var/log/ceph:z -v /var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/crash:/var/lib/ceph/crash:z -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /tmp/ceph-tmptdgpm2ss:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp87slpl09:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.io/ceph/ceph@sha256:00965b7e88c0cef116e6a47107051a4bfe952139e7b94c6cefd6607cf38a3f0f lvm batch --no-auto /dev/vdb1 --yes --no-systemd
/bin/podman: stderr usage: ceph-volume lvm batch [-h] [--db-devices [DB_DEVICES [DB_DEVICES ...]]]
/bin/podman: stderr [--wal-devices [WAL_DEVICES [WAL_DEVICES ...]]]
/bin/podman: stderr [--journal-devices [JOURNAL_DEVICES [JOURNAL_DEVICES ...]]]
/bin/podman: stderr [--auto] [--no-auto] [--bluestore] [--filestore]
/bin/podman: stderr [--report] [--yes]
/bin/podman: stderr [--format {json,json-pretty,pretty}] [--dmcrypt]
/bin/podman: stderr [--crush-device-class CRUSH_DEVICE_CLASS]
/bin/podman: stderr [--no-systemd]
/bin/podman: stderr [--osds-per-device OSDS_PER_DEVICE]
/bin/podman: stderr [--data-slots DATA_SLOTS]
/bin/podman: stderr [--block-db-size BLOCK_DB_SIZE]
/bin/podman: stderr [--block-db-slots BLOCK_DB_SLOTS]
/bin/podman: stderr [--block-wal-size BLOCK_WAL_SIZE]
/bin/podman: stderr [--block-wal-slots BLOCK_WAL_SLOTS]
/bin/podman: stderr [--journal-size JOURNAL_SIZE]
/bin/podman: stderr [--journal-slots JOURNAL_SLOTS] [--prepare]
/bin/podman: stderr [--osd-ids [OSD_IDS [OSD_IDS ...]]]
/bin/podman: stderr [DEVICES [DEVICES ...]]
/bin/podman: stderr ceph-volume lvm batch: error: /dev/vdb1 is a partition, please pass LVs or raw block devices
Traceback (most recent call last):
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 8571, in <module>
main()
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 8559, in main
r = ctx.func(ctx)
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 1737, in _infer_config
return func(ctx)
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 1678, in _infer_fsid
return func(ctx)
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 1765, in _infer_image
return func(ctx)
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 1665, in _validate_fsid
return func(ctx)
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 4822, in command_ceph_volume
out, err, code = call_throws(ctx, c.run_cmd())
File "/var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/cephadm.55e70975756e8c180366666f9fa21d3301c67edc3a5000698fd6e7ccb6fcafee", line 1467, in call_throws
raise RuntimeError('Failed command: %s' % ' '.join(command))
RuntimeError: Failed command: /bin/podman run --rm --ipc=host --stop-signal=SIGTERM --net=host --entrypoint /usr/sbin/ceph-volume --privileged --group-add=disk --init -e CONTAINER_IMAGE=quay.io/ceph/ceph@sha256:00965b7e88c0cef116e6a47107051a4bfe952139e7b94c6cefd6607cf38a3f0f -e NODE_NAME=node1 -e CEPH_USE_RANDOM_NONCE=1 -e CEPH_VOLUME_OSDSPEC_AFFINITY=None -v /var/run/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d:/var/run/ceph:z -v /var/log/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d:/var/log/ceph:z -v /var/lib/ceph/ba757a9a-01d9-11ed-b2f7-fa163e4c5d9d/crash:/var/lib/ceph/crash:z -v /dev:/dev -v /run/udev:/run/udev -v /sys:/sys -v /run/lvm:/run/lvm -v /run/lock/lvm:/run/lock/lvm -v /tmp/ceph-tmptdgpm2ss:/etc/ceph/ceph.conf:z -v /tmp/ceph-tmp87slpl09:/var/lib/ceph/bootstrap-osd/ceph.keyring:z quay.io/ceph/ceph@sha256:00965b7e88c0cef116e6a47107051a4bfe952139e7b94c6cefd6607cf38a3f0f lvm batch --no-auto /dev/vdb1 --yes --no-systemd

Actions #1

Updated by xiaoliang yang over 1 year ago

[root@node1 ~]# ceph-volume lvm prepare --data /dev/vdb1
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring i - osd new 600663fb-6279-4c1b-b97f-f9b863590888
Running command: /usr/sbin/vgcreate --force --yes ceph-0b825100-65fb-4c87-9b74-ae999cf43de2 /dev/vdb1
stdout: Volume group "ceph-0b825100-65fb-4c87-9b74-ae999cf43de2" successfully created
Running command: /usr/sbin/lvcreate --yes -l 1279 -n osd-block-600663fb-6279-4c1b-b97f-f9b863590888 ceph-0b825100-65fb-4c87-9b74-ae999cf43de2
stdout: Logical volume "osd-block-600663fb-6279-4c1b-b97f-f9b863590888" created.
Running command: /usr/bin/ceph-authtool --gen-print-key
Running command: /usr/bin/mount -t tmpfs tmpfs /var/lib/ceph/osd/ceph-0
Running command: /usr/bin/chown -h ceph:ceph /dev/ceph-0b825100-65fb-4c87-9b74-ae999cf43de2/osd-block-600663fb-6279-4c1b-b97f-f9b863590888
Running command: /usr/bin/chown -R ceph:ceph /dev/dm-2
Running command: /usr/bin/ln -s /dev/ceph-0b825100-65fb-4c87-9b74-ae999cf43de2/osd-block-600663fb-6279-4c1b-b97f-f9b863590888 /var/lib/ceph/osd/ceph-0/block
Running command: /usr/bin/ceph --cluster ceph --name client.bootstrap-osd --keyring /var/lib/ceph/bootstrap-osd/ceph.keyring mon getmap -o /var/lib/ceph/osd/ceph-0/activate.monmap
stderr: got monmap epoch 3
Running command: /usr/bin/ceph-authtool /var/lib/ceph/osd/ceph-0/keyring --create-keyring --name osd.0 --add-key AQBd8+FiLrg5MRAA862tgxwCcvEFJlKtHVwGYQ==
stdout: creating /var/lib/ceph/osd/ceph-0/keyring
added entity osd.0 auth(key=AQBd8+FiLrg5MRAA862tgxwCcvEFJlKtHVwGYQ==)
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/keyring
Running command: /usr/bin/chown -R ceph:ceph /var/lib/ceph/osd/ceph-0/
Running command: /usr/bin/ceph-osd --cluster ceph --osd-objectstore bluestore --mkfs -i 0 --monmap /var/lib/ceph/osd/ceph-0/activate.monmap --keyfile - --osd-data /var/lib/ceph/osd/ceph-0/ --osd-uuid 600663fb-6279-4c1b-b97f-f9b863590888 --setuser ceph --setgroup ceph
stderr: 2022-07-28T10:24:32.966+0800 7fa0fa7b6080 -1 bluestore(/var/lib/ceph/osd/ceph-0/) _read_fsid unparsable uuid
-
> ceph-volume lvm prepare successful for: /dev/vdb1

[root@node1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
vda 252:0 0 120G 0 disk
├─vda1 252:1 0 1G 0 part /boot
├─vda2 252:2 0 12G 0 part
│ ├─uniontechos-root 253:0 0 117.8G 0 lvm /
│ └─uniontechos-swap 253:1 0 1G 0 lvm [SWAP]
└─vda3 252:3 0 107G 0 part
└─uniontechos-root 253:0 0 117.8G 0 lvm /
vdb 252:16 0 32G 0 disk
├─vdb1 252:17 0 5G 0 part
│ └─ceph--0b825100--65fb--4c87--9b74--ae999cf43de2-osd--block--600663fb--6279--4c1b--b97f--f9b863590888 253:2 0 5G 0 lvm
├─vdb2 252:18 0 5G 0 part
├─vdb3 252:19 0 5G 0 part
├─vdb4 252:20 0 1K 0 part
├─vdb5 252:21 0 5G 0 part
├─vdb6 252:22 0 5G 0 part
├─vdb7 252:23 0 5G 0 part
└─vdb8 252:24 0 2G 0 part
vdc 252:32 0 32G 0 disk

[root@node1 ~]# ceph orch daemon add osd node1:/dev/ceph-0b825100-65fb-4c87-9b74-ae999cf43de2/osd-block-600663fb-6279-4c1b-b97f-f9b863590888
Created osd(s) 0 on host 'node1'

Actions #2

Updated by Ilya Dryomov about 1 year ago

  • Target version deleted (v16.2.11)
Actions

Also available in: Atom PDF