Bug #9307
"s3.test_multipart_upload_multiple_sizes ... ERROR" in upgrade:dumpling-firefly-x-master-distro-basic-vps run
0%
Description
2014-08-31T12:03:19.415 INFO:teuthology.orchestra.run.vpm173.stderr:s3tests.functional.test_s3.test_multipart_upload_multiple_sizes ... ERROR
archive_path: /var/lib/teuthworker/archive/teuthology-2014-08-31_08:49:23-upgrade:dumpling-firefly-x-master-distro-basic-vps/463427 branch: master description: upgrade:dumpling-firefly-x/parallel/{0-cluster/start.yaml 1-dumpling-install/dumpling.yaml 2-workload/{rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-firefly-upgrade/firefly.yaml 4-workload/{rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 5-upgrade-sequence/upgrade-by-daemon.yaml 6-final-workload/{ec-rados-default.yaml ec-rados-plugin=jerasure-k=3-m=1.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_s3tests.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml} email: ceph-qa@ceph.com job_id: '463427' kernel: &id001 kdb: true sha1: distro last_in_suite: false machine_type: vps name: teuthology-2014-08-31_08:49:23-upgrade:dumpling-firefly-x-master-distro-basic-vps nuke-on-error: true os_type: ubuntu os_version: '14.04' overrides: admin_socket: branch: master ceph: conf: global: osd heartbeat grace: 100 mon: debug mon: 20 debug ms: 1 debug paxos: 20 mon warn on legacy crush tunables: false osd: debug filestore: 20 debug journal: 20 debug ms: 1 debug osd: 20 log-whitelist: - slow request - scrub mismatch - ScrubResult sha1: fb79062fb4faa28a189fd9022cef62a5e12c0c26 ceph-deploy: branch: dev: master conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: debug mon: 1 debug ms: 20 debug paxos: 20 osd default pool size: 2 install: ceph: sha1: fb79062fb4faa28a189fd9022cef62a5e12c0c26 rgw: default_idle_timeout: 1200 s3tests: branch: master idle_timeout: 1200 workunit: sha1: fb79062fb4faa28a189fd9022cef62a5e12c0c26 owner: scheduled_teuthology@teuthology priority: 1000 roles: - - mon.a - mds.a - osd.0 - osd.1 - - mon.b - mon.c - osd.2 - osd.3 - - client.0 - client.1 suite: upgrade:dumpling-firefly-x suite_branch: master suite_path: /var/lib/teuthworker/src/ceph-qa-suite_master targets: ubuntu@vpm100.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDPcDOnmIZuk4H1jWtWBtcdIOAYAGJ0k+v0jjsgNGQq7NuxLUk05K4l7zjLdfTHwZeNiTOvXi7/sC8QnmHeI/AelWvBU6fmH0StEzA1VeAu8sLgS5H9ggb88H4P6jGvDVywikOFY1Q8oXIQNfMiQJlQsha+qx0XLcDNOq+gAma48u8269AwzIIwDCK0X0irdy0NZtoSLioJ6KUC8jiDoW7zx9Cagwark9q9zv6Rvp6YyfvCSy/M3hDsACA3eg3A7qJ2n04sXzlfWvQMycMYbEpXesNit8OG/twMc28qoSlYY/4rPrsn0cnnuC+iNaaOAWjv1Ai/Je0Uym2NJZPYS69p ubuntu@vpm173.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC6SxH0UUtKjxLKeFbzw4jK03+S+xt6MkNBLm8T2CtYVNZoTnh2j5pfyGiR5wMRuWfwbJC5JpDEtly/y2/pY2Z6CIw+FvxkkmMzhWX4FVtdbsrJgCwEKZ4Czdjbj91QCSa+mC8p0eZ8KfknNZwRSoYO/AIUpKTHNzE3J+4WxoGEXiXg60/31cKuglT9UY8y1RDDuOkxEY/njyquMEG8PUt71umQQGjKW5b4ZBZTKvA+4xIHqGwBwlqFQtmYgStIdixTGqydkd8txBriWBK3I+jRF/BJI74lMRcmp14mUUSGCKNp7xwZKQdRqsWub8IB/QglklVK4kIgIKdzVqHpZjXL ubuntu@vpm175.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAJPq8U3o+8bQdDiB5BNLoMQw8WEv7RCxk1BXLE9q3EfQrw4LBBtEjuueLO4oHu4LkLgI+gQaetzWt1xIMzJwLdOioLjmcXPoGuxWXn+VUNTrReuJFyUjUuCVgI/G/9Fp29DhJVWIkhh6X7mE/bhixWjQALDpuo2yKMlDENnXmpPaoLlgC6o4GJZiideBHnX0fbmtDKwVMq/sB1AWmfsB1M5hu2O2D/TRVFsCYtkm4+qPucuEja/d93l977qfiXLhNXdFTF0pAu5KEGxVwDAjwKVrAQ9GMtIDzPgTPBll5dYXdMqRgBKzs0ZAvI6wzN9XoFJq9g9evR3xuMkiemYGZ tasks: - internal.lock_machines: - 3 - vps - internal.save_config: null - internal.check_lock: null - internal.connect: null - internal.serialize_remote_roles: null - internal.check_conflict: null - internal.check_ceph_data: null - internal.vm_setup: null - kernel: *id001 - internal.base: null - internal.archive: null - internal.coredump: null - internal.sudo: null - internal.syslog: null - internal.timer: null - chef: null - clock.check: null - install: branch: dumpling - print: '**** done dumpling install' - ceph: fs: xfs - parallel: - workload - print: '**** done parallel' - install.upgrade: client.0: branch: firefly mon.a: branch: firefly mon.b: branch: firefly - print: '**** done install.upgrade' - ceph.restart: null - print: '**** done restart' - parallel: - workload2 - upgrade-sequence - print: '**** done parallel' - install.upgrade: client.0: null - print: '**** done install.upgrade client.0 to the version from teuthology-suite arg' - rados: clients: - client.0 ec_pool: true objects: 50 op_weights: append: 100 copy_from: 50 delete: 50 read: 100 rmattr: 25 rollback: 50 setattr: 25 snap_create: 50 snap_remove: 50 write: 0 ops: 4000 - rados: clients: - client.0 ec_pool: true erasure_code_profile: k: 3 m: 1 name: jerasure31profile plugin: jerasure ruleset-failure-domain: osd technique: reed_sol_van objects: 50 op_weights: append: 100 copy_from: 50 delete: 50 read: 100 rmattr: 25 rollback: 50 setattr: 25 snap_create: 50 snap_remove: 50 write: 0 ops: 4000 - rados: clients: - client.1 objects: 50 op_weights: delete: 50 read: 100 rollback: 50 snap_create: 50 snap_remove: 50 write: 100 ops: 4000 - workunit: clients: client.1: - rados/load-gen-mix.sh - sequential: - mon_thrash: revive_delay: 20 thrash_delay: 1 - workunit: clients: client.1: - rados/test.sh - print: '**** done rados/test.sh - 6-final-workload' - workunit: clients: client.1: - cls/test_cls_rbd.sh - workunit: clients: client.1: - rbd/import_export.sh env: RBD_CREATE_ARGS: --new-format - rgw: - client.1 - s3tests: client.1: rgw_server: client.1 - swift: client.1: rgw_server: client.1 teuthology_branch: master tube: vps upgrade-sequence: sequential: - install.upgrade: mon.a: null - print: '**** done install.upgrade mon.a to the version from teuthology-suite arg' - install.upgrade: mon.b: null - print: '**** done install.upgrade mon.b to the version from teuthology-suite arg' - ceph.restart: daemons: - mon.a - sleep: duration: 60 - ceph.restart: daemons: - mon.b - sleep: duration: 60 - ceph.restart: - mon.c - sleep: duration: 60 - ceph.restart: - osd.0 - sleep: duration: 60 - ceph.restart: - osd.1 - sleep: duration: 60 - ceph.restart: - osd.2 - sleep: duration: 60 - ceph.restart: - osd.3 - sleep: duration: 60 - ceph.restart: - mds.a - exec: mon.a: - ceph osd crush tunables firefly verbose: true worker_log: /var/lib/teuthworker/archive/worker_logs/worker.vps.15616 workload: sequential: - workunit: branch: dumpling clients: client.0: - rados/test.sh - cls - print: '**** done rados/test.sh & cls' - workunit: branch: dumpling clients: client.0: - rados/load-gen-big.sh - print: '**** done rados/load-gen-big.sh' - workunit: branch: dumpling clients: client.0: - rbd/test_librbd.sh - print: '**** done rbd/test_librbd.sh' - workunit: branch: dumpling clients: client.0: - rbd/test_librbd_python.sh - print: '**** done rbd/test_librbd_python.sh' workload2: sequential: - workunit: branch: firefly clients: client.0: - rados/test.sh - cls - print: '**** done #rados/test.sh and cls 2' - workunit: branch: firefly clients: client.0: - rados/load-gen-big.sh - print: '**** done rados/load-gen-big.sh 2' - workunit: branch: firefly clients: client.0: - rbd/test_librbd.sh - print: '**** done rbd/test_librbd.sh 2' - workunit: branch: firefly clients: client.0: - rbd/test_librbd_python.sh - print: '**** done rbd/test_librbd_python.sh 2'
description: upgrade:dumpling-firefly-x/parallel/{0-cluster/start.yaml 1-dumpling-install/dumpling.yaml 2-workload/{rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 3-firefly-upgrade/firefly.yaml 4-workload/{rados_api.yaml rados_loadgenbig.yaml test_rbd_api.yaml test_rbd_python.yaml} 5-upgrade-sequence/upgrade-by-daemon.yaml 6-final-workload/{ec-rados-default.yaml ec-rados-plugin=jerasure-k=3-m=1.yaml rados-snaps-few-objects.yaml rados_loadgenmix.yaml rados_mon_thrash.yaml rbd_cls.yaml rbd_import_export.yaml rgw_s3tests.yaml rgw_swift.yaml} distros/ubuntu_14.04.yaml} duration: 11580.49200797081 failure_reason: 'Command failed on vpm173 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.1.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg /home/ubuntu/cephtest/s3-tests/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/s3-tests -v -a ''!fails_on_rgw''"' flavor: basic owner: scheduled_teuthology@teuthology success: false
Related issues
History
#1 Updated by Yuri Weinstein about 9 years ago
Here is similar failure in http://qa-proxy.ceph.com/teuthology/teuthology-2014-09-04_11:08:01-upgrade:dumpling-firefly-x-master-distro-basic-vps/468673/teuthology.log
2014-09-04T14:11:11.268 INFO:teuthology.orchestra.run.vpm137.stderr:s3tests.functional.test_s3.test_atomic_dual_write_8mb ... ERROR
#2 Updated by Yuri Weinstein about 9 years ago
I was able to reproduce this on manual run.
#3 Updated by Ian Colle about 9 years ago
- Assignee set to Yehuda Sadeh
- Priority changed from Normal to Urgent
#4 Updated by Sage Weil about 9 years ago
- Project changed from Ceph to rgw
#5 Updated by Yuri Weinstein about 9 years ago
Could be the same as here:
http://pulpito.ceph.com/teuthology-2014-09-07_17:08:02-upgrade:dumpling-firefly-x-master-distro-basic-vps/471372/ on another suite
suite:upgrade:dumpling-firefly-x
#6 Updated by Sage Weil about 9 years ago
- Assignee changed from Yehuda Sadeh to Ilya Dryomov
#7 Updated by Sage Weil about 9 years ago
- Assignee changed from Ilya Dryomov to Yuri Weinstein
#8 Updated by Yuri Weinstein about 9 years ago
Can't reproduce it with
ceph: conf: global: osd heartbeat grace: 100 client: debug ms: 1 debug rgw: 20
But was able to see on a manual run without it, logs are in /a/yuriw/468673
#9 Updated by Yuri Weinstein about 9 years ago
- Assignee changed from Yuri Weinstein to Sage Weil
I can't reproduce it with debug and original sha1 is already gone.
#10 Updated by Yehuda Sadeh almost 9 years ago
- Status changed from New to Pending Backport
- Backport set to firefly
Should have been fixed by commit:d41c3e858c6f215792c67b8c2a42312cae07ece9
Note that when backporting also need to take:
7b137246b49a9f0b4d8b8d5cebfa78cc1ebd14e7
5bb94ede19a50543a02a8019ed6c9680b3852d4e
#11 Updated by Yuri Weinstein almost 9 years ago
Also in http://pulpito.front.sepia.ceph.com/teuthology-2014-09-28_08:42:11-upgrade:dumpling-firefly-giant:parallel-giant-distro-basic-vps/ run on centos and rhel
Jobs: '516244', '516246', '516247', '516250', '516252', '516253'
2014-09-28T11:06:35.729 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_cors_origin_response ... ok 2014-09-28T11:10:28.637 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_read_1mb ... ERROR 2014-09-28T11:14:30.949 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_read_4mb ... ERROR 2014-09-28T11:18:24.645 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_read_8mb ... ERROR 2014-09-28T11:27:39.869 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_write_1mb ... ERROR 2014-09-28T11:36:10.197 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_write_4mb ... ERROR 2014-09-28T11:45:33.029 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_write_8mb ... ERROR 2014-09-28T11:54:15.349 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_dual_write_1mb ... ERROR 2014-09-28T12:03:29.881 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_dual_write_4mb ... ERROR 2014-09-28T12:12:16.076 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_dual_write_8mb ... ERROR 2014-09-28T12:12:18.253 INFO:teuthology.orchestra.run.vpm072.stderr:s3tests.functional.test_s3.test_atomic_write_bucket_gone ... ok
#12 Updated by Yuri Weinstein almost 9 years ago
suite:upgrade:dumpling-x
Jobs: ['524364', '524366', '524367'] failed with same errors.
#13 Updated by Sage Weil almost 9 years ago
- Status changed from Pending Backport to Resolved
#14 Updated by Yuri Weinstein almost 9 years ago
- Status changed from Resolved to New
I see the same issues on giant run:
2014-10-07T19:11:27.378 INFO:teuthology.orchestra.run.vpm029.stdout:successfully deleted pool unique_pool_1 2014-10-07T19:12:45.945 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_read_4mb ... ERROR 2014-10-07T19:16:58.989 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_read_8mb ... ERROR 2014-10-07T19:25:32.707 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_write_1mb ... ERROR 2014-10-07T19:33:42.987 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_write_4mb ... ERROR 2014-10-07T19:42:17.952 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_write_8mb ... ERROR 2014-10-07T19:50:51.884 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_dual_write_1mb ... ERROR 2014-10-07T20:00:05.133 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_dual_write_4mb ... ERROR 2014-10-07T20:09:32.495 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_dual_write_8mb ... ERROR 2014-10-07T20:09:34.838 INFO:teuthology.orchestra.run.vpm139.stderr:s3tests.functional.test_s3.test_atomic_write_bucket_gone ... ok
#15 Updated by Yuri Weinstein almost 9 years ago
Same issues in http://qa-proxy.ceph.com/teuthology/teuthology-2014-10-09_19:00:01-upgrade:dumpling-x-firefly-distro-basic-vps/ run
suite:upgrade:dumpling-x
4 jobs failed:
Failure: Command failed on vpm117 with status 1: "S3TEST_CONF=/home/ubuntu/cephtest/archive/s3-tests.client.1.conf BOTO_CONFIG=/home/ubuntu/cephtest/boto.cfg /home/ubuntu/cephtest/s3-tests/virtualenv/bin/nosetests -w /home/ubuntu/cephtest/s3-tests -v -a '!fails_on_rgw'" ['535409', '535411', '535412', '535413']
Errors:
2014-10-09T20:52:56.413 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_100_continue ... ERROR 2014-10-09T20:52:59.122 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_bucket_acls_changes_persistent ... ok 2014-10-09T20:53:03.507 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_stress_bucket_acls_changes ... ok 2014-10-09T20:53:03.643 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_set_cors ... ok 2014-10-09T20:53:10.952 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_cors_origin_response ... ok 2014-10-09T20:57:48.807 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_read_1mb ... ERROR 2014-10-09T21:02:36.905 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_read_4mb ... ERROR 2014-10-09T21:06:22.987 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_read_8mb ... ERROR 2014-10-09T21:15:35.389 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_write_1mb ... ERROR 2014-10-09T21:24:34.870 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_write_4mb ... ERROR 2014-10-09T21:32:58.553 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_write_8mb ... ERROR 2014-10-09T21:41:09.409 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_dual_write_1mb ... ERROR 2014-10-09T21:50:35.372 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_dual_write_4mb ... ERROR 2014-10-09T21:58:42.918 INFO:teuthology.orchestra.run.vpm117.stderr:s3tests.functional.test_s3.test_atomic_dual_write_8mb ... ERROR
#16 Updated by Sage Weil almost 9 years ago
- Status changed from New to Resolved
above errors from yuri are #9169.. something else
#17 Updated by Sage Weil almost 9 years ago
- Status changed from Resolved to 12
- Assignee deleted (
Sage Weil)
#18 Updated by Sage Weil almost 9 years ago
- Status changed from 12 to Resolved