Actions
Bug #8162
closedosd: dumpling advances last_backfill prematurely
% Done:
0%
Source:
Q/A
Tags:
Backport:
Regression:
Severity:
3 - minor
Reviewed:
Affected Versions:
ceph-qa-suite:
Pull request ID:
Crash signature (v1):
Crash signature (v2):
Description
vpm02622231-362 from 1609223 to 2378512 tid 3 ranges are [0~58,453151~419471,1609223~769289] 2014-04-19T02:11:24.053 INFO:teuthology.task.rados.rados.0.out:[10.214.138.78]: 1464: oids not in use 499 2014-04-19T02:11:24.054 INFO:teuthology.task.rados.rados.0.out:[10.214.138.78]: Reading 234 2014-04-19T02:11:24.054 INFO:teuthology.task.rados.rados.0.out:[10.214.138.78]: 1465: oids not in use 498 2014-04-19T02:11:24.054 INFO:teuthology.task.rados.rados.0.out:[10.214.138.78]: RollingBack 374 to 122 2014-04-19T02:11:24.057 INFO:teuthology.task.rados.rados.0.err:[10.214.138.78]: Error: oid 234 read returned error code -2 2014-04-19T02:11:25.350 INFO:teuthology.task.rados.rados.0.out:[10.214.138.78]: finishing write
archive_path: /var/lib/teuthworker/archive/teuthology-2014-04-18_20:35:03-upgrade:dumpling-x:stress-split-firefly-distro-basic-vps/201934 description: upgrade/dumpling-x/stress-split/{0-cluster/start.yaml 1-dumpling-install/dumpling.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/rbd-import-export.yaml 6-next-mon/monb.yaml 7-workload/rados_api_tests.yaml 8-next-mon/monc.yaml 9-workload/{rados_api_tests.yaml rbd-python.yaml rgw-s3tests.yaml snaps-many-objects.yaml} distros/debian_7.0.yaml} email: null job_id: '201934' kernel: &id001 kdb: true sha1: distro last_in_suite: false machine_type: vps name: teuthology-2014-04-18_20:35:03-upgrade:dumpling-x:stress-split-firefly-distro-basic-vps nuke-on-error: true os_type: debian os_version: '7.0' overrides: admin_socket: branch: firefly ceph: conf: mon: debug mon: 20 debug ms: 1 debug paxos: 20 mon warn on legacy crush tunables: false osd: debug filestore: 20 debug journal: 20 debug ms: 1 debug osd: 20 log-whitelist: - slow request - wrongly marked me down - objects unfound and apparently lost - log bound mismatch sha1: 7251983d8ea778e1dd638fa923c16db1ce9c21a8 ceph-deploy: branch: dev: firefly conf: client: log file: /var/log/ceph/ceph-$name.$pid.log mon: debug mon: 1 debug ms: 20 debug paxos: 20 osd default pool size: 2 install: ceph: sha1: 7251983d8ea778e1dd638fa923c16db1ce9c21a8 s3tests: branch: master workunit: sha1: 7251983d8ea778e1dd638fa923c16db1ce9c21a8 owner: scheduled_teuthology@teuthology roles: - - mon.a - mon.b - mds.a - osd.0 - osd.1 - osd.2 - - osd.3 - osd.4 - osd.5 - mon.c - - client.0 targets: ubuntu@vpm026.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDUscT3sfngJGk9Hsh48GRaoH9YOZGHqT9qND56zJGH4DEEGbmGDM5KITkPBY3SqA95BjA4hhBg0oG7BxeGtLbdbngqXhJ/rNQewDe+smi4r6mWbqViH0ZSv3MuaPnm1tiZ1crLeVj8E684dok1otO8Sg6e8CqoxnazsyWo6YeTqJzahkRoXL4tRQsnlP4mxg9ymrO/9fEgOttYI8vkPaTPSlqwhdhpvW22H1mzgxQGe23RD8N6qMAaidqpDm5LHL9fEoNw8w5doSaD0rrOG+Get+d0wdMb8FsARJPaRWbwzF21k2sj1vOBhYX7vEHSNZwGR/ovJ8dj1L7NAdyvaW/N ubuntu@vpm027.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDnMLVCPoVWVoa+EC7kIGpb510RIeAfhDzE89fpiLgjRlbHDNJF6j2cFGZ+peY2sYFZNUZpgdWQF83yuiYCf89d5y3oKVJ+BU3EqY079ItfEqvi5Q2XcyDpbtNeeEdbl5KNJMSg9CJwQQRGzJ/oX6OGpKFL3m7wtzUzA+t2lZc1Gjonkq4zKVlLUrOHF7NxakPfJga0Oe1gjlcogl9SRiYwkDjygQ9srfxhR0y1YbCur2E05wRN7Vt6TRJiD1rLNLMEMF8AbFqYikQcAbgK3yanS+OLxrHT7M0gkmg0wxnCAeR22RUoJEEfSpOo5BanZjs5SXh2ADxEDYuQRh1Qew0f ubuntu@vpm028.front.sepia.ceph.com: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEQu13NiKDl4a59ZhjI5pL5pLlmwDDZONanBeb8BUDRfw2Hal7BA0aUZ7HgXCDYkYpSTbHXIbrCslhUEhCB8SZMJke0zxGOBpO8mfv1NqrfoHC/FXtL64cMN5DxKPf9zxjXxC1QDXv3T2TgF9XWCju+ho97LpRfA5i40RtfOz7jUygbelFTmckDtXzugsfqJOG3DNF9+HY5iV8oOABF9QMcVqzivoFlUZei1N0PJIphwlcOdnmEyrqX6mKlzNZRkwNIw16JS688a4ggjpppaj5mUOV9iAeZyxgozkVIieJEFl3uQqLRmWgYK9z1MEyoOjMtHtwyLn9C3J3YXTVmauR tasks: - internal.lock_machines: - 3 - vps - internal.save_config: null - internal.check_lock: null - internal.connect: null - internal.check_conflict: null - internal.check_ceph_data: null - internal.vm_setup: null - kernel: *id001 - internal.base: null - internal.archive: null - internal.coredump: null - internal.sudo: null - internal.syslog: null - internal.timer: null - chef: null - clock.check: null - install: branch: dumpling - ceph: fs: xfs - install.upgrade: osd.0: null - ceph.restart: daemons: - osd.0 - osd.1 - osd.2 - thrashosds: chance_pgnum_grow: 1 chance_pgpnum_fix: 1 thrash_primary_affinity: false timeout: 1200 - ceph.restart: daemons: - mon.a wait-for-healthy: false wait-for-osds-up: true - workunit: branch: dumpling clients: client.0: - rbd/import_export.sh env: RBD_CREATE_ARGS: --new-format - ceph.restart: daemons: - mon.b wait-for-healthy: false wait-for-osds-up: true - workunit: branch: dumpling clients: client.0: - rados/test-upgrade-firefly.sh - install.upgrade: mon.c: null - ceph.restart: daemons: - mon.c wait-for-healthy: false wait-for-osds-up: true - ceph.wait_for_mon_quorum: - a - b - c - workunit: branch: dumpling clients: client.0: - rados/test-upgrade-firefly.sh - workunit: branch: dumpling clients: client.0: - rbd/test_librbd_python.sh - rgw: client.0: idle_timeout: 300 - swift: client.0: rgw_server: client.0 - rados: clients: - client.0 objects: 500 op_weights: delete: 50 read: 100 rollback: 50 snap_create: 50 snap_remove: 50 write: 100 ops: 4000 teuthology_branch: firefly verbose: true worker_log: /var/lib/teuthworker/archive/worker_logs/worker.vps.30561
description: upgrade/dumpling-x/stress-split/{0-cluster/start.yaml 1-dumpling-install/dumpling.yaml 2-partial-upgrade/firsthalf.yaml 3-thrash/default.yaml 4-mon/mona.yaml 5-workload/rbd-import-export.yaml 6-next-mon/monb.yaml 7-workload/rados_api_tests.yaml 8-next-mon/monc.yaml 9-workload/{rados_api_tests.yaml rbd-python.yaml rgw-s3tests.yaml snaps-many-objects.yaml} distros/debian_7.0.yaml} duration: 7816.86822104454 failure_reason: '"2014-04-19 09:38:12.347438 osd.4 10.214.138.70:6812/21951 32 : [ERR] 259.1f missing primary copy of 79a6511f/vpm02622231-234/head//259, unfound" in cluster log' flavor: basic owner: scheduled_teuthology@teuthology success: false
Actions