[ubuntu@vpm100 src]$ git describe v0.93-258-g8b991ef [ubuntu@vpm100 src]$ uname -a Linux vpm100.front.sepia.ceph.com 3.10.0-123.el7.x86_64 #1 SMP Mon May 5 11:16:57 EDT 2014 x86_64 x86_64 x86_64 GNU/Linux [ubuntu@vpm100 src]$ lsb_release -a LSB Version: :core-4.1-amd64:core-4.1-noarch:cxx-4.1-amd64:cxx-4.1-noarch:desktop-4.1-amd64:desktop-4.1-noarch:languages-4.1-amd64:languages-4.1-noarch:printing-4.1-amd64:printing-4.1-noarch Distributor ID: RedHatEnterpriseServer Description: Red Hat Enterprise Linux Server release 7.0 (Maipo) Release: 7.0 Codename: Maipo [ubuntu@vpm100 src]$ sudo test/ceph-disk.sh test_activate_dev + source test/test_btrfs_common.sh + PS4='${FUNCNAME[0]}: $LINENO: ' : 24: export PATH=:/sbin:/bin:/usr/sbin:/usr/bin : 24: PATH=:/sbin:/bin:/usr/sbin:/usr/bin : 25: DIR=test-ceph-disk : 26: OSD_DATA=test-ceph-disk/osd : 27: MON_ID=a : 28: MONA=127.0.0.1:7451 : 29: TEST_POOL=rbd :: 30: uuidgen : 30: FSID=a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 : 31: export CEPH_CONF=test-ceph-disk/ceph.conf : 31: CEPH_CONF=test-ceph-disk/ceph.conf : 32: export 'CEPH_ARGS=--fsid a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5' : 32: CEPH_ARGS='--fsid a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5' : 33: CEPH_ARGS+=' --chdir=' : 34: CEPH_ARGS+=' --run-dir=test-ceph-disk' : 35: CEPH_ARGS+=' --osd-failsafe-full-ratio=.99' : 36: CEPH_ARGS+=' --mon-host=127.0.0.1:7451' : 37: CEPH_ARGS+=' --log-file=test-ceph-disk/$name.log' : 38: CEPH_ARGS+=' --pid-file=test-ceph-disk/$name.pidfile' : 39: CEPH_ARGS+=' --osd-pool-default-erasure-code-directory=.libs' : 40: CEPH_ARGS+=' --auth-supported=none' : 41: CEPH_ARGS+=' --osd-journal-size=100' : 42: CEPH_DISK_ARGS= : 43: CEPH_DISK_ARGS+=' --statedir=test-ceph-disk' : 44: CEPH_DISK_ARGS+=' --sysconfdir=test-ceph-disk' : 45: CEPH_DISK_ARGS+=' --prepend-to-path=' : 46: CEPH_DISK_ARGS+=' --verbose' : 47: TIMEOUT=360 :: 49: which cat : 49: cat=/bin/cat :: 50: which timeout : 50: timeout=/bin/timeout :: 51: which diff : 51: diff=/bin/diff :: 52: which mkdir : 52: mkdir=/bin/mkdir :: 53: which rm : 53: rm=/bin/rm :: 54: which uuidgen : 54: uuidgen=/bin/uuidgen : 524: run test_activate_dev run: 508: local default_actions run: 509: default_actions+='test_path ' run: 510: default_actions+='test_no_path ' run: 511: default_actions+='test_find_cluster_by_uuid ' run: 512: default_actions+='test_prepend_to_path ' run: 513: default_actions+='test_activate_dir_magic ' run: 514: default_actions+='test_activate_dir ' run: 515: default_actions+='test_keyring_path ' run: 516: local actions=test_activate_dev run: 517: for action in '$actions' run: 518: setup setup: 57: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk find: ‘test-ceph-disk’: No such file or directory kkill_daemons: 93: grep pidfile tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk setup: 58: mkdir test-ceph-disk setup: 59: mkdir test-ceph-disk/osd setup: 61: touch test-ceph-disk/ceph.conf run: 519: test_activate_dev ttest_activate_dev: 383: id -u test_activate_dev: 383: test 0 '!=' 0 ttest_activate_dev: 388: create_dev vdf.disk ccreate_dev: 302: local name=vdf.disk ccreate_dev: 304: set -x ccreate_dev: 305: echo create_dev vdf.disk create_dev vdf.disk ccreate_dev: 306: dd if=/dev/zero of=vdf.disk bs=1024k count=400 400+0 records in 400+0 records out 419430400 bytes (419 MB) copied, 0,352537 s, 1,2 GB/s ccreate_dev: 307: losetup --find vdf.disk cccreate_dev: 308: losetup --associated vdf.disk cccreate_dev: 308: cut -f1 -d: ccreate_dev: 308: local dev=/dev/loop0 ccreate_dev: 309: ceph-disk zap /dev/loop0 ccreate_dev: 310: echo /dev/loop0 ccreate_dev: 311: set +x test_activate_dev: 388: local disk=/dev/loop0 ttest_activate_dev: 389: create_dev vdg.disk ccreate_dev: 302: local name=vdg.disk ccreate_dev: 304: set -x ccreate_dev: 305: echo create_dev vdg.disk create_dev vdg.disk ccreate_dev: 306: dd if=/dev/zero of=vdg.disk bs=1024k count=400 400+0 records in 400+0 records out 419430400 bytes (419 MB) copied, 0,306832 s, 1,4 GB/s ccreate_dev: 307: losetup --find vdg.disk cccreate_dev: 308: losetup --associated vdg.disk cccreate_dev: 308: cut -f1 -d: ccreate_dev: 308: local dev=/dev/loop1 ccreate_dev: 309: ceph-disk zap /dev/loop1 ccreate_dev: 310: echo /dev/loop1 ccreate_dev: 311: set +x test_activate_dev: 389: local journal=/dev/loop1 ttest_activate_dev: 390: create_dev vdh.disk ccreate_dev: 302: local name=vdh.disk ccreate_dev: 304: set -x ccreate_dev: 305: echo create_dev vdh.disk create_dev vdh.disk ccreate_dev: 306: dd if=/dev/zero of=vdh.disk bs=1024k count=400 400+0 records in 400+0 records out 419430400 bytes (419 MB) copied, 0,296205 s, 1,4 GB/s ccreate_dev: 307: losetup --find vdh.disk cccreate_dev: 308: losetup --associated vdh.disk cccreate_dev: 308: cut -f1 -d: ccreate_dev: 308: local dev=/dev/loop2 ccreate_dev: 309: ceph-disk zap /dev/loop2 ccreate_dev: 310: echo /dev/loop2 ccreate_dev: 311: set +x test_activate_dev: 390: local newdisk=/dev/loop2 test_activate_dev: 392: activate_dev_body /dev/loop0 /dev/loop1 /dev/loop2 activate_dev_body: 330: local disk=/dev/loop0 activate_dev_body: 331: local journal=/dev/loop1 activate_dev_body: 332: local newdisk=/dev/loop2 activate_dev_body: 334: setup setup: 57: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk setup: 58: mkdir test-ceph-disk setup: 59: mkdir test-ceph-disk/osd setup: 61: touch test-ceph-disk/ceph.conf activate_dev_body: 335: run_mon run_mon: 74: local mon_dir=test-ceph-disk/a run_mon: 77: ./ceph-mon --id a --mkfs --mon-data=test-ceph-disk/a --mon-initial-members=a ./ceph-mon: mon.noname-a 127.0.0.1:7451/0 is local, renaming to mon.a ./ceph-mon: set fsid to a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 ./ceph-mon: created monfs at test-ceph-disk/a for mon.a run_mon: 84: ./ceph-mon --id a --mon-data=test-ceph-disk/a --mon-osd-full-ratio=.99 --mon-data-avail-crit=1 --mon-cluster-log-file=test-ceph-disk/a/log --public-addr 127.0.0.1:7451 activate_dev_body: 339: echo -----ONE -----ONE activate_dev_body: 340: test_activate /dev/loop0 /dev/loop0p1 /dev/loop1 test_activate: 203: local to_prepare=/dev/loop0 test_activate: 204: local to_activate=/dev/loop0p1 test_activate: 205: local journal=/dev/loop1 ttest_activate: 206: uuidgen test_activate: 206: local osd_uuid=103c4552-94fe-4b9d-839d-fbb43a04ed81 test_activate: 208: /bin/mkdir -p test-ceph-disk/osd test_activate: 210: ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose prepare --osd-uuid 103c4552-94fe-4b9d-839d-fbb43a04ed81 /dev/loop0 /dev/loop1 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=osd_journal_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type INFO:ceph-disk:Running command: /sbin/parted --machine -- /dev/loop1 print WARNING:ceph-disk:OSD will not be hot-swappable if journal is not the same device as the osd data DEBUG:ceph-disk:Creating journal partition num 1 size 100 on /dev/loop1 INFO:ceph-disk:Running command: /sbin/sgdisk --new=1:0:+100M --change-name=1:ceph journal --partition-guid=1:e69d723e-94ed-4819-8cb4-95b66d22db0e --typecode=1:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/loop1 Information: Moved requested sector from 34 to 2048 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop1 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop1 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/e69d723e-94ed-4819-8cb4-95b66d22db0e DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/e69d723e-94ed-4819-8cb4-95b66d22db0e DEBUG:ceph-disk:Creating osd partition on /dev/loop0 INFO:ceph-disk:Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:103c4552-94fe-4b9d-839d-fbb43a04ed81 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be -- /dev/loop0 Information: Moved requested sector from 34 to 2048 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on created device /dev/loop0 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop0 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Creating xfs fs on /dev/loop0p1 INFO:ceph-disk:Running command: /sbin/mkfs -t xfs -f -i size=2048 -- /dev/loop0p1 meta-data=/dev/loop0p1 isize=2048 agcount=4, agsize=25535 blks = sectsz=512 attr=2, projid32bit=1 = crc=0 data = bsize=4096 blocks=102139, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=0 log =internal log bsize=4096 blocks=864, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 DEBUG:ceph-disk:Mounting /dev/loop0p1 on test-ceph-disk/tmp/mnt.tfBQD5 with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/tmp/mnt.tfBQD5 DEBUG:ceph-disk:Preparing osd data dir test-ceph-disk/tmp/mnt.tfBQD5 DEBUG:ceph-disk:Creating symlink test-ceph-disk/tmp/mnt.tfBQD5/journal -> /dev/disk/by-partuuid/e69d723e-94ed-4819-8cb4-95b66d22db0e DEBUG:ceph-disk:Unmounting test-ceph-disk/tmp/mnt.tfBQD5 INFO:ceph-disk:Running command: /bin/umount -- test-ceph-disk/tmp/mnt.tfBQD5 INFO:ceph-disk:Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/loop0 Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop0 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop0 partx: /dev/loop0: error adding partition 1 test_activate: 213: /bin/timeout 360 ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose activate --mark-init=none /dev/loop0p1 INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/loop0p1 INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs DEBUG:ceph-disk:Mounting /dev/loop0p1 on test-ceph-disk/tmp/mnt.BKb8Yz with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/tmp/mnt.BKb8Yz DEBUG:ceph-disk:Cluster uuid is a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid DEBUG:ceph-disk:Cluster name is ceph DEBUG:ceph-disk:OSD uuid is 103c4552-94fe-4b9d-839d-fbb43a04ed81 DEBUG:ceph-disk:Allocating OSD id... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring osd create --concise 103c4552-94fe-4b9d-839d-fbb43a04ed81 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** DEBUG:ceph-disk:OSD id is 0 DEBUG:ceph-disk:Initializing OSD... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring mon getmap -o test-ceph-disk/tmp/mnt.BKb8Yz/activate.monmap *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** got monmap epoch 1 INFO:ceph-disk:Running command: ceph-osd --cluster ceph --mkfs --mkkey -i 0 --monmap test-ceph-disk/tmp/mnt.BKb8Yz/activate.monmap --osd-data test-ceph-disk/tmp/mnt.BKb8Yz --osd-journal test-ceph-disk/tmp/mnt.BKb8Yz/journal --osd-uuid 103c4552-94fe-4b9d-839d-fbb43a04ed81 --keyring test-ceph-disk/tmp/mnt.BKb8Yz/keyring HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:28.451160 7f35a15e57c0 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected 103c4552-94fe-4b9d-839d-fbb43a04ed81, invalid (someone else's?) journal HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:28.515630 7f35a15e57c0 -1 filestore(test-ceph-disk/tmp/mnt.BKb8Yz) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory 2015-03-07 12:21:28.549414 7f35a15e57c0 -1 created object store test-ceph-disk/tmp/mnt.BKb8Yz journal test-ceph-disk/tmp/mnt.BKb8Yz/journal for osd.0 fsid a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 2015-03-07 12:21:28.549514 7f35a15e57c0 -1 auth: error reading file: test-ceph-disk/tmp/mnt.BKb8Yz/keyring: can't open test-ceph-disk/tmp/mnt.BKb8Yz/keyring: (2) No such file or directory 2015-03-07 12:21:28.549773 7f35a15e57c0 -1 created new key in keyring test-ceph-disk/tmp/mnt.BKb8Yz/keyring DEBUG:ceph-disk:Authorizing OSD key... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring auth add osd.0 -i test-ceph-disk/tmp/mnt.BKb8Yz/keyring osd allow * mon allow profile osd *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** added key for osd.0 DEBUG:ceph-disk:ceph osd.0 data dir is ready at test-ceph-disk/tmp/mnt.BKb8Yz DEBUG:ceph-disk:Moving mount to final location... INFO:ceph-disk:Running command: /bin/mount -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/osd/ceph-0 INFO:ceph-disk:Running command: /bin/umount -l -- test-ceph-disk/tmp/mnt.BKb8Yz INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --id=0 --osd-data=test-ceph-disk/osd/ceph-0 --osd-journal=test-ceph-disk/osd/ceph-0/journal starting osd.0 at :/0 osd_data test-ceph-disk/osd/ceph-0 test-ceph-disk/osd/ceph-0/journal test_activate: 217: /bin/timeout 360 ./ceph osd pool set rbd size 1 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** set pool 0 size to 1 ttest_activate: 219: ceph osd create 103c4552-94fe-4b9d-839d-fbb43a04ed81 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** test_activate: 219: local id=0 test_activate: 220: local weight=1 test_activate: 221: ./ceph osd crush add osd.0 1 root=default host=localhost *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** add item id 0 name 'osd.0' weight 1 at location {host=localhost,root=default} to crush map test_activate: 222: echo FOO test_activate: 223: /bin/timeout 360 ./rados --pool rbd put BAR test-ceph-disk/BAR test_activate: 224: /bin/timeout 360 ./rados --pool rbd get BAR test-ceph-disk/BAR.copy test_activate: 225: /bin/diff test-ceph-disk/BAR test-ceph-disk/BAR.copy activate_dev_body: 341: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/mon.a.pidfile kill_daemons: 95: pid=8027 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8027 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8027 test/ceph-disk.sh: line 97: kill: (8027) - No such process kill_daemons: 97: break kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.0.pidfile kill_daemons: 95: pid=8250 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8250 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8250 kill_daemons: 98: sleep 1 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8250 test/ceph-disk.sh: line 97: kill: (8250) - No such process kill_daemons: 97: break activate_dev_body: 342: umount /dev/loop0p1 activate_dev_body: 343: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.0.pidfile kill_daemons: 95: pid=8250 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8250 test/ceph-disk.sh: line 97: kill: (8250) - No such process kill_daemons: 97: break tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk activate_dev_body: 345: echo -----TWO -----TWO activate_dev_body: 346: setup setup: 57: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk find: ‘test-ceph-disk’: No such file or directory kkill_daemons: 93: grep pidfile tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk setup: 58: mkdir test-ceph-disk setup: 59: mkdir test-ceph-disk/osd setup: 61: touch test-ceph-disk/ceph.conf activate_dev_body: 347: run_mon run_mon: 74: local mon_dir=test-ceph-disk/a run_mon: 77: ./ceph-mon --id a --mkfs --mon-data=test-ceph-disk/a --mon-initial-members=a ./ceph-mon: mon.noname-a 127.0.0.1:7451/0 is local, renaming to mon.a ./ceph-mon: set fsid to a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 ./ceph-mon: created monfs at test-ceph-disk/a for mon.a run_mon: 84: ./ceph-mon --id a --mon-data=test-ceph-disk/a --mon-osd-full-ratio=.99 --mon-data-avail-crit=1 --mon-cluster-log-file=test-ceph-disk/a/log --public-addr 127.0.0.1:7451 activate_dev_body: 353: ceph-disk zap /dev/loop0 Caution: invalid backup GPT header, but valid main header; regenerating backup header from main header. Warning! Main and backup partition tables differ! Use the 'c' and 'e' options on the recovery & transformation menu to examine the two tables. Warning! One or more CRCs don't match. You should repair the disk! **************************************************************************** Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk verification and recovery are STRONGLY recommended. **************************************************************************** Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. GPT data structures destroyed! You may now partition the disk using fdisk or other utilities. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. activate_dev_body: 354: test_activate /dev/loop0 /dev/loop0p1 /dev/loop1 test_activate: 203: local to_prepare=/dev/loop0 test_activate: 204: local to_activate=/dev/loop0p1 test_activate: 205: local journal=/dev/loop1 ttest_activate: 206: uuidgen test_activate: 206: local osd_uuid=40291dd0-38c7-4263-8506-9eee5edaba3f test_activate: 208: /bin/mkdir -p test-ceph-disk/osd test_activate: 210: ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose prepare --osd-uuid 40291dd0-38c7-4263-8506-9eee5edaba3f /dev/loop0 /dev/loop1 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=osd_journal_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type INFO:ceph-disk:Running command: /sbin/parted --machine -- /dev/loop1 print WARNING:ceph-disk:OSD will not be hot-swappable if journal is not the same device as the osd data DEBUG:ceph-disk:Creating journal partition num 2 size 100 on /dev/loop1 INFO:ceph-disk:Running command: /sbin/sgdisk --new=2:0:+100M --change-name=2:ceph journal --partition-guid=2:7ee0c3bb-4d39-459e-aeb6-113235d56607 --typecode=2:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/loop1 Information: Moved requested sector from 204834 to 206848 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop1 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop1 partx: /dev/loop1: error adding partition 1 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/7ee0c3bb-4d39-459e-aeb6-113235d56607 DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/7ee0c3bb-4d39-459e-aeb6-113235d56607 DEBUG:ceph-disk:Creating osd partition on /dev/loop0 INFO:ceph-disk:Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:40291dd0-38c7-4263-8506-9eee5edaba3f --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be -- /dev/loop0 Information: Moved requested sector from 34 to 2048 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on created device /dev/loop0 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop0 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Creating xfs fs on /dev/loop0p1 INFO:ceph-disk:Running command: /sbin/mkfs -t xfs -f -i size=2048 -- /dev/loop0p1 meta-data=/dev/loop0p1 isize=2048 agcount=4, agsize=25535 blks = sectsz=512 attr=2, projid32bit=1 = crc=0 data = bsize=4096 blocks=102139, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=0 log =internal log bsize=4096 blocks=864, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 DEBUG:ceph-disk:Mounting /dev/loop0p1 on test-ceph-disk/tmp/mnt.A6nTfG with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/tmp/mnt.A6nTfG DEBUG:ceph-disk:Preparing osd data dir test-ceph-disk/tmp/mnt.A6nTfG DEBUG:ceph-disk:Creating symlink test-ceph-disk/tmp/mnt.A6nTfG/journal -> /dev/disk/by-partuuid/7ee0c3bb-4d39-459e-aeb6-113235d56607 DEBUG:ceph-disk:Unmounting test-ceph-disk/tmp/mnt.A6nTfG INFO:ceph-disk:Running command: /bin/umount -- test-ceph-disk/tmp/mnt.A6nTfG INFO:ceph-disk:Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/loop0 Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop0 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop0 partx: /dev/loop0: error adding partition 1 test_activate: 213: /bin/timeout 360 ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose activate --mark-init=none /dev/loop0p1 INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/loop0p1 INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs DEBUG:ceph-disk:Mounting /dev/loop0p1 on test-ceph-disk/tmp/mnt.75xace with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/tmp/mnt.75xace DEBUG:ceph-disk:Cluster uuid is a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid DEBUG:ceph-disk:Cluster name is ceph DEBUG:ceph-disk:OSD uuid is 40291dd0-38c7-4263-8506-9eee5edaba3f DEBUG:ceph-disk:Allocating OSD id... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring osd create --concise 40291dd0-38c7-4263-8506-9eee5edaba3f *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** DEBUG:ceph-disk:OSD id is 0 DEBUG:ceph-disk:Initializing OSD... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring mon getmap -o test-ceph-disk/tmp/mnt.75xace/activate.monmap *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** got monmap epoch 1 INFO:ceph-disk:Running command: ceph-osd --cluster ceph --mkfs --mkkey -i 0 --monmap test-ceph-disk/tmp/mnt.75xace/activate.monmap --osd-data test-ceph-disk/tmp/mnt.75xace --osd-journal test-ceph-disk/tmp/mnt.75xace/journal --osd-uuid 40291dd0-38c7-4263-8506-9eee5edaba3f --keyring test-ceph-disk/tmp/mnt.75xace/keyring HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:40.141800 7f49d66377c0 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected 40291dd0-38c7-4263-8506-9eee5edaba3f, invalid (someone else's?) journal HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:40.202818 7f49d66377c0 -1 filestore(test-ceph-disk/tmp/mnt.75xace) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory 2015-03-07 12:21:40.228872 7f49d66377c0 -1 created object store test-ceph-disk/tmp/mnt.75xace journal test-ceph-disk/tmp/mnt.75xace/journal for osd.0 fsid a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 2015-03-07 12:21:40.228963 7f49d66377c0 -1 auth: error reading file: test-ceph-disk/tmp/mnt.75xace/keyring: can't open test-ceph-disk/tmp/mnt.75xace/keyring: (2) No such file or directory 2015-03-07 12:21:40.229146 7f49d66377c0 -1 created new key in keyring test-ceph-disk/tmp/mnt.75xace/keyring DEBUG:ceph-disk:Authorizing OSD key... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring auth add osd.0 -i test-ceph-disk/tmp/mnt.75xace/keyring osd allow * mon allow profile osd *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** added key for osd.0 DEBUG:ceph-disk:ceph osd.0 data dir is ready at test-ceph-disk/tmp/mnt.75xace DEBUG:ceph-disk:Moving mount to final location... INFO:ceph-disk:Running command: /bin/mount -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/osd/ceph-0 INFO:ceph-disk:Running command: /bin/umount -l -- test-ceph-disk/tmp/mnt.75xace INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --id=0 --osd-data=test-ceph-disk/osd/ceph-0 --osd-journal=test-ceph-disk/osd/ceph-0/journal starting osd.0 at :/0 osd_data test-ceph-disk/osd/ceph-0 test-ceph-disk/osd/ceph-0/journal test_activate: 217: /bin/timeout 360 ./ceph osd pool set rbd size 1 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** set pool 0 size to 1 ttest_activate: 219: ceph osd create 40291dd0-38c7-4263-8506-9eee5edaba3f *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** test_activate: 219: local id=0 test_activate: 220: local weight=1 test_activate: 221: ./ceph osd crush add osd.0 1 root=default host=localhost *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** add item id 0 name 'osd.0' weight 1 at location {host=localhost,root=default} to crush map test_activate: 222: echo FOO test_activate: 223: /bin/timeout 360 ./rados --pool rbd put BAR test-ceph-disk/BAR test_activate: 224: /bin/timeout 360 ./rados --pool rbd get BAR test-ceph-disk/BAR.copy test_activate: 225: /bin/diff test-ceph-disk/BAR test-ceph-disk/BAR.copy activate_dev_body: 355: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/mon.a.pidfile kill_daemons: 95: pid=8606 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8606 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8606 test/ceph-disk.sh: line 97: kill: (8606) - No such process kill_daemons: 97: break kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.0.pidfile kill_daemons: 95: pid=8837 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8837 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8837 kill_daemons: 98: sleep 1 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8837 test/ceph-disk.sh: line 97: kill: (8837) - No such process kill_daemons: 97: break activate_dev_body: 356: umount /dev/loop0p1 activate_dev_body: 357: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.0.pidfile kill_daemons: 95: pid=8837 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 8837 test/ceph-disk.sh: line 97: kill: (8837) - No such process kill_daemons: 97: break tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk activate_dev_body: 359: echo -----THREE -----THREE activate_dev_body: 360: setup setup: 57: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk find: ‘test-ceph-disk’: No such file or directory kkill_daemons: 93: grep pidfile tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk setup: 58: mkdir test-ceph-disk setup: 59: mkdir test-ceph-disk/osd setup: 61: touch test-ceph-disk/ceph.conf activate_dev_body: 361: run_mon run_mon: 74: local mon_dir=test-ceph-disk/a run_mon: 77: ./ceph-mon --id a --mkfs --mon-data=test-ceph-disk/a --mon-initial-members=a ./ceph-mon: mon.noname-a 127.0.0.1:7451/0 is local, renaming to mon.a ./ceph-mon: set fsid to a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 ./ceph-mon: created monfs at test-ceph-disk/a for mon.a run_mon: 84: ./ceph-mon --id a --mon-data=test-ceph-disk/a --mon-osd-full-ratio=.99 --mon-data-avail-crit=1 --mon-cluster-log-file=test-ceph-disk/a/log --public-addr 127.0.0.1:7451 activate_dev_body: 365: test_activate /dev/loop2 /dev/loop2p1 /dev/loop1p1 test_activate: 203: local to_prepare=/dev/loop2 test_activate: 204: local to_activate=/dev/loop2p1 test_activate: 205: local journal=/dev/loop1p1 ttest_activate: 206: uuidgen test_activate: 206: local osd_uuid=bc251ad7-e79d-4350-ac5c-d15c2ead9b6d test_activate: 208: /bin/mkdir -p test-ceph-disk/osd test_activate: 210: ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose prepare --osd-uuid bc251ad7-e79d-4350-ac5c-d15c2ead9b6d /dev/loop2 /dev/loop1p1 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=osd_journal_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type DEBUG:ceph-disk:Journal /dev/loop1p1 is a partition WARNING:ceph-disk:OSD will not be hot-swappable if journal is not the same device as the osd data INFO:ceph-disk:Running command: /sbin/blkid -p -o udev /dev/loop1p1 INFO:ceph-disk:Running command: /sbin/blkid -p -o udev /dev/loop1 WARNING:ceph-disk:Old blkid does not support ID_PART_ENTRY_* fields, trying sgdisk; may not correctly identify ceph volumes with dmcrypt INFO:ceph-disk:Running command: /sbin/sgdisk -p /dev/loop Problem opening /dev/loop for reading! Error is 2. The specified file does not exist! WARNING:ceph-disk:Journal /dev/loop1p1 was not prepared with ceph-disk. Symlinking directly. DEBUG:ceph-disk:Creating osd partition on /dev/loop2 INFO:ceph-disk:Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:bc251ad7-e79d-4350-ac5c-d15c2ead9b6d --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be -- /dev/loop2 Information: Moved requested sector from 34 to 2048 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on created device /dev/loop2 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop2 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Creating xfs fs on /dev/loop2p1 INFO:ceph-disk:Running command: /sbin/mkfs -t xfs -f -i size=2048 -- /dev/loop2p1 meta-data=/dev/loop2p1 isize=2048 agcount=4, agsize=25535 blks = sectsz=512 attr=2, projid32bit=1 = crc=0 data = bsize=4096 blocks=102139, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=0 log =internal log bsize=4096 blocks=864, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 DEBUG:ceph-disk:Mounting /dev/loop2p1 on test-ceph-disk/tmp/mnt.yYP7iv with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop2p1 test-ceph-disk/tmp/mnt.yYP7iv DEBUG:ceph-disk:Preparing osd data dir test-ceph-disk/tmp/mnt.yYP7iv DEBUG:ceph-disk:Creating symlink test-ceph-disk/tmp/mnt.yYP7iv/journal -> /dev/loop1p1 DEBUG:ceph-disk:Unmounting test-ceph-disk/tmp/mnt.yYP7iv INFO:ceph-disk:Running command: /bin/umount -- test-ceph-disk/tmp/mnt.yYP7iv INFO:ceph-disk:Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/loop2 Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop2 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop2 partx: /dev/loop2: error adding partition 1 test_activate: 213: /bin/timeout 360 ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose activate --mark-init=none /dev/loop2p1 INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/loop2p1 INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs DEBUG:ceph-disk:Mounting /dev/loop2p1 on test-ceph-disk/tmp/mnt.Xi6lik with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop2p1 test-ceph-disk/tmp/mnt.Xi6lik DEBUG:ceph-disk:Cluster uuid is a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid DEBUG:ceph-disk:Cluster name is ceph DEBUG:ceph-disk:OSD uuid is bc251ad7-e79d-4350-ac5c-d15c2ead9b6d DEBUG:ceph-disk:Allocating OSD id... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring osd create --concise bc251ad7-e79d-4350-ac5c-d15c2ead9b6d *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** DEBUG:ceph-disk:OSD id is 0 DEBUG:ceph-disk:Initializing OSD... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring mon getmap -o test-ceph-disk/tmp/mnt.Xi6lik/activate.monmap *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** got monmap epoch 1 INFO:ceph-disk:Running command: ceph-osd --cluster ceph --mkfs --mkkey -i 0 --monmap test-ceph-disk/tmp/mnt.Xi6lik/activate.monmap --osd-data test-ceph-disk/tmp/mnt.Xi6lik --osd-journal test-ceph-disk/tmp/mnt.Xi6lik/journal --osd-uuid bc251ad7-e79d-4350-ac5c-d15c2ead9b6d --keyring test-ceph-disk/tmp/mnt.Xi6lik/keyring HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:48.564801 7f383e3b57c0 -1 journal check: ondisk fsid 103c4552-94fe-4b9d-839d-fbb43a04ed81 doesn't match expected bc251ad7-e79d-4350-ac5c-d15c2ead9b6d, invalid (someone else's?) journal HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:48.607004 7f383e3b57c0 -1 filestore(test-ceph-disk/tmp/mnt.Xi6lik) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory 2015-03-07 12:21:48.631104 7f383e3b57c0 -1 created object store test-ceph-disk/tmp/mnt.Xi6lik journal test-ceph-disk/tmp/mnt.Xi6lik/journal for osd.0 fsid a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 2015-03-07 12:21:48.631170 7f383e3b57c0 -1 auth: error reading file: test-ceph-disk/tmp/mnt.Xi6lik/keyring: can't open test-ceph-disk/tmp/mnt.Xi6lik/keyring: (2) No such file or directory 2015-03-07 12:21:48.631268 7f383e3b57c0 -1 created new key in keyring test-ceph-disk/tmp/mnt.Xi6lik/keyring DEBUG:ceph-disk:Authorizing OSD key... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring auth add osd.0 -i test-ceph-disk/tmp/mnt.Xi6lik/keyring osd allow * mon allow profile osd *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** added key for osd.0 DEBUG:ceph-disk:ceph osd.0 data dir is ready at test-ceph-disk/tmp/mnt.Xi6lik DEBUG:ceph-disk:Moving mount to final location... INFO:ceph-disk:Running command: /bin/mount -o noatime,inode64 -- /dev/loop2p1 test-ceph-disk/osd/ceph-0 INFO:ceph-disk:Running command: /bin/umount -l -- test-ceph-disk/tmp/mnt.Xi6lik INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --id=0 --osd-data=test-ceph-disk/osd/ceph-0 --osd-journal=test-ceph-disk/osd/ceph-0/journal starting osd.0 at :/0 osd_data test-ceph-disk/osd/ceph-0 test-ceph-disk/osd/ceph-0/journal test_activate: 217: /bin/timeout 360 ./ceph osd pool set rbd size 1 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** set pool 0 size to 1 ttest_activate: 219: ceph osd create bc251ad7-e79d-4350-ac5c-d15c2ead9b6d *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** test_activate: 219: local id=0 test_activate: 220: local weight=1 test_activate: 221: ./ceph osd crush add osd.0 1 root=default host=localhost *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** add item id 0 name 'osd.0' weight 1 at location {host=localhost,root=default} to crush map test_activate: 222: echo FOO test_activate: 223: /bin/timeout 360 ./rados --pool rbd put BAR test-ceph-disk/BAR test_activate: 224: /bin/timeout 360 ./rados --pool rbd get BAR test-ceph-disk/BAR.copy test_activate: 225: /bin/diff test-ceph-disk/BAR test-ceph-disk/BAR.copy activate_dev_body: 373: echo -----FOUR -----FOUR activate_dev_body: 374: ceph-disk zap /dev/loop0 Caution: invalid backup GPT header, but valid main header; regenerating backup header from main header. Warning! Main and backup partition tables differ! Use the 'c' and 'e' options on the recovery & transformation menu to examine the two tables. Warning! One or more CRCs don't match. You should repair the disk! **************************************************************************** Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk verification and recovery are STRONGLY recommended. **************************************************************************** Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. GPT data structures destroyed! You may now partition the disk using fdisk or other utilities. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. activate_dev_body: 375: test_activate /dev/loop0 /dev/loop0p1 /dev/loop1 test_activate: 203: local to_prepare=/dev/loop0 test_activate: 204: local to_activate=/dev/loop0p1 test_activate: 205: local journal=/dev/loop1 ttest_activate: 206: uuidgen test_activate: 206: local osd_uuid=3b2db639-b1fe-4ca2-883c-07171e152561 test_activate: 208: /bin/mkdir -p test-ceph-disk/osd test_activate: 210: ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose prepare --osd-uuid 3b2db639-b1fe-4ca2-883c-07171e152561 /dev/loop0 /dev/loop1 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_type INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mkfs_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=osd_journal_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_cryptsetup_parameters INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_key_size INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_dmcrypt_type INFO:ceph-disk:Running command: /sbin/parted --machine -- /dev/loop1 print WARNING:ceph-disk:OSD will not be hot-swappable if journal is not the same device as the osd data DEBUG:ceph-disk:Creating journal partition num 3 size 100 on /dev/loop1 INFO:ceph-disk:Running command: /sbin/sgdisk --new=3:0:+100M --change-name=3:ceph journal --partition-guid=3:56799c94-d419-4363-9935-785d47e31010 --typecode=3:45b0969e-9b03-4f30-b4c6-b4b80ceff106 --mbrtogpt -- /dev/loop1 Information: Moved requested sector from 409634 to 411648 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop1 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop1 partx: /dev/loop1: error adding partitions 1-2 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/56799c94-d419-4363-9935-785d47e31010 DEBUG:ceph-disk:Journal is GPT partition /dev/disk/by-partuuid/56799c94-d419-4363-9935-785d47e31010 DEBUG:ceph-disk:Creating osd partition on /dev/loop0 INFO:ceph-disk:Running command: /sbin/sgdisk --largest-new=1 --change-name=1:ceph data --partition-guid=1:3b2db639-b1fe-4ca2-883c-07171e152561 --typecode=1:89c57f98-2fe5-4dc0-89c1-f3ad0ceff2be -- /dev/loop0 Information: Moved requested sector from 34 to 2048 in order to align on 2048-sector boundaries. Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on created device /dev/loop0 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop0 INFO:ceph-disk:Running command: /sbin/udevadm settle DEBUG:ceph-disk:Creating xfs fs on /dev/loop0p1 INFO:ceph-disk:Running command: /sbin/mkfs -t xfs -f -i size=2048 -- /dev/loop0p1 meta-data=/dev/loop0p1 isize=2048 agcount=4, agsize=25535 blks = sectsz=512 attr=2, projid32bit=1 = crc=0 data = bsize=4096 blocks=102139, imaxpct=25 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 ftype=0 log =internal log bsize=4096 blocks=864, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 DEBUG:ceph-disk:Mounting /dev/loop0p1 on test-ceph-disk/tmp/mnt.7mFKs1 with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/tmp/mnt.7mFKs1 DEBUG:ceph-disk:Preparing osd data dir test-ceph-disk/tmp/mnt.7mFKs1 DEBUG:ceph-disk:Creating symlink test-ceph-disk/tmp/mnt.7mFKs1/journal -> /dev/disk/by-partuuid/56799c94-d419-4363-9935-785d47e31010 DEBUG:ceph-disk:Unmounting test-ceph-disk/tmp/mnt.7mFKs1 INFO:ceph-disk:Running command: /bin/umount -- test-ceph-disk/tmp/mnt.7mFKs1 INFO:ceph-disk:Running command: /sbin/sgdisk --typecode=1:4fbd7e29-9d25-41b8-afd0-062c0ceff05d -- /dev/loop0 Warning: The kernel is still using the old partition table. The new table will be used at the next reboot. The operation has completed successfully. INFO:ceph-disk:calling partx on prepared device /dev/loop0 INFO:ceph-disk:re-reading known partitions will display errors INFO:ceph-disk:Running command: /sbin/partx -a /dev/loop0 partx: /dev/loop0: error adding partition 1 test_activate: 213: /bin/timeout 360 ./ceph-disk --statedir=test-ceph-disk --sysconfdir=test-ceph-disk --prepend-to-path= --verbose activate --mark-init=none /dev/loop0p1 INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/loop0p1 INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs INFO:ceph-disk:Running command: ceph-conf --cluster=ceph --name=osd. --lookup osd_fs_mount_options_xfs DEBUG:ceph-disk:Mounting /dev/loop0p1 on test-ceph-disk/tmp/mnt.Oey6Xy with options noatime,inode64 INFO:ceph-disk:Running command: mount -t xfs -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/tmp/mnt.Oey6Xy DEBUG:ceph-disk:Cluster uuid is a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --show-config-value=fsid DEBUG:ceph-disk:Cluster name is ceph DEBUG:ceph-disk:OSD uuid is 3b2db639-b1fe-4ca2-883c-07171e152561 DEBUG:ceph-disk:Allocating OSD id... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring osd create --concise 3b2db639-b1fe-4ca2-883c-07171e152561 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** DEBUG:ceph-disk:OSD id is 1 DEBUG:ceph-disk:Initializing OSD... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring mon getmap -o test-ceph-disk/tmp/mnt.Oey6Xy/activate.monmap *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** got monmap epoch 1 INFO:ceph-disk:Running command: ceph-osd --cluster ceph --mkfs --mkkey -i 1 --monmap test-ceph-disk/tmp/mnt.Oey6Xy/activate.monmap --osd-data test-ceph-disk/tmp/mnt.Oey6Xy --osd-journal test-ceph-disk/tmp/mnt.Oey6Xy/journal --osd-uuid 3b2db639-b1fe-4ca2-883c-07171e152561 --keyring test-ceph-disk/tmp/mnt.Oey6Xy/keyring HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:59.175534 7f77d1ee87c0 -1 journal check: ondisk fsid 00000000-0000-0000-0000-000000000000 doesn't match expected 3b2db639-b1fe-4ca2-883c-07171e152561, invalid (someone else's?) journal HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument HDIO_DRIVE_CMD(identify) failed: Invalid argument 2015-03-07 12:21:59.217549 7f77d1ee87c0 -1 filestore(test-ceph-disk/tmp/mnt.Oey6Xy) could not find 23c2fcde/osd_superblock/0//-1 in index: (2) No such file or directory 2015-03-07 12:21:59.248109 7f77d1ee87c0 -1 created object store test-ceph-disk/tmp/mnt.Oey6Xy journal test-ceph-disk/tmp/mnt.Oey6Xy/journal for osd.1 fsid a3034ebb-8bc0-444f-a5f8-06bf6ed1d2a5 2015-03-07 12:21:59.248176 7f77d1ee87c0 -1 auth: error reading file: test-ceph-disk/tmp/mnt.Oey6Xy/keyring: can't open test-ceph-disk/tmp/mnt.Oey6Xy/keyring: (2) No such file or directory 2015-03-07 12:21:59.248357 7f77d1ee87c0 -1 created new key in keyring test-ceph-disk/tmp/mnt.Oey6Xy/keyring DEBUG:ceph-disk:Authorizing OSD key... INFO:ceph-disk:Running command: ceph --cluster ceph --name client.bootstrap-osd --keyring test-ceph-disk/bootstrap-osd/ceph.keyring auth add osd.1 -i test-ceph-disk/tmp/mnt.Oey6Xy/keyring osd allow * mon allow profile osd *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** added key for osd.1 DEBUG:ceph-disk:ceph osd.1 data dir is ready at test-ceph-disk/tmp/mnt.Oey6Xy DEBUG:ceph-disk:Moving mount to final location... INFO:ceph-disk:Running command: /bin/mount -o noatime,inode64 -- /dev/loop0p1 test-ceph-disk/osd/ceph-1 INFO:ceph-disk:Running command: /bin/umount -l -- test-ceph-disk/tmp/mnt.Oey6Xy INFO:ceph-disk:Running command: ceph-osd --cluster=ceph --id=1 --osd-data=test-ceph-disk/osd/ceph-1 --osd-journal=test-ceph-disk/osd/ceph-1/journal starting osd.1 at :/0 osd_data test-ceph-disk/osd/ceph-1 test-ceph-disk/osd/ceph-1/journal test_activate: 217: /bin/timeout 360 ./ceph osd pool set rbd size 1 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** set pool 0 size to 1 ttest_activate: 219: ceph osd create 3b2db639-b1fe-4ca2-883c-07171e152561 *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** test_activate: 219: local id=1 test_activate: 220: local weight=1 test_activate: 221: ./ceph osd crush add osd.1 1 root=default host=localhost *** DEVELOPER MODE: setting PATH, PYTHONPATH and LD_LIBRARY_PATH *** add item id 1 name 'osd.1' weight 1 at location {host=localhost,root=default} to crush map test_activate: 222: echo FOO test_activate: 223: /bin/timeout 360 ./rados --pool rbd put BAR test-ceph-disk/BAR test_activate: 224: /bin/timeout 360 ./rados --pool rbd get BAR test-ceph-disk/BAR.copy test_activate: 225: /bin/diff test-ceph-disk/BAR test-ceph-disk/BAR.copy activate_dev_body: 376: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/mon.a.pidfile kill_daemons: 95: pid=9193 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9193 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9193 test/ceph-disk.sh: line 97: kill: (9193) - No such process kill_daemons: 97: break kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.0.pidfile kill_daemons: 95: pid=9415 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9415 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9415 kill_daemons: 98: sleep 1 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9415 test/ceph-disk.sh: line 97: kill: (9415) - No such process kill_daemons: 97: break kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.1.pidfile kill_daemons: 95: pid=9964 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9964 kill_daemons: 98: sleep 0 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9964 kill_daemons: 98: sleep 1 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9964 test/ceph-disk.sh: line 97: kill: (9964) - No such process kill_daemons: 97: break activate_dev_body: 377: umount /dev/loop2p1 activate_dev_body: 378: umount /dev/loop0p1 activate_dev_body: 379: teardown teardown: 65: kill_daemons kkill_daemons: 93: find test-ceph-disk kkill_daemons: 93: grep pidfile kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.0.pidfile kill_daemons: 95: pid=9415 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9415 test/ceph-disk.sh: line 97: kill: (9415) - No such process kill_daemons: 97: break kill_daemons: 94: for pidfile in '$(find $DIR | grep pidfile)' kkill_daemons: 95: cat test-ceph-disk/osd.1.pidfile kill_daemons: 95: pid=9964 kill_daemons: 96: for try in 0 1 1 1 2 3 kill_daemons: 97: kill 9964 test/ceph-disk.sh: line 97: kill: (9964) - No such process kill_daemons: 97: break tteardown: 66: stat -f -c %T . teardown: 66: '[' xfs == btrfs ']' teardown: 70: rm -fr test-ceph-disk test_activate_dev: 393: status=0 test_activate_dev: 394: test 0 '!=' 0 test_activate_dev: 396: destroy_dev vdf.disk /dev/loop0 destroy_dev: 315: local name=vdf.disk destroy_dev: 316: local dev=/dev/loop0 destroy_dev: 318: set -x destroy_dev: 319: echo destroy_dev vdf.disk /dev/loop0 destroy_dev vdf.disk /dev/loop0 destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop0p1 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop0p2 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop0p3 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop0p4 destroy_dev: 321: true destroy_dev: 323: ceph-disk zap /dev/loop0 destroy_dev: 324: losetup --detach /dev/loop0 destroy_dev: 325: rm vdf.disk destroy_dev: 326: set +x destroy_dev: 319: echo destroy_dev vdg.disk /dev/loop1 destroy_dev vdg.disk /dev/loop1 destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop1p1 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop1p2 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop1p3 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop1p4 destroy_dev: 321: true destroy_dev: 323: ceph-disk zap /dev/loop1 destroy_dev: 324: losetup --detach /dev/loop1 destroy_dev: 325: rm vdg.disk destroy_dev: 326: set +x destroy_dev: 319: echo destroy_dev vdh.disk /dev/loop2 destroy_dev vdh.disk /dev/loop2 destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop2p1 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop2p2 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop2p3 destroy_dev: 321: true destroy_dev: 320: for partition in 1 2 3 4 destroy_dev: 321: umount /dev/loop2p4 destroy_dev: 321: true destroy_dev: 323: ceph-disk zap /dev/loop2 destroy_dev: 324: losetup --detach /dev/loop2 destroy_dev: 325: rm vdh.disk destroy_dev: 326: set +x find: ‘test-ceph-disk’: No such file or directory