Bug #18359
radosgw Segmentation fault when use swiftclient upload file
0%
Description
ceph version 10.2.1 (3a66dd4f30852819c1bdaa8ec23c795d4ad77269)
I use ceph radosgw as openstack object storage service, so sets up the gateway to accept Keystone as the users authority, and use swift protocol upload files.
but when I upload objects(use swiftclient) the radosgw thread raise segment fault error.
radosgw config file.
[client.radosgw.gateway] host = AIBJ-ITC-RADOSGW-1 rgw_frontends = civetweb port=8080 rgw keystone url = http://10.1.237.23:5000 rgw keystone admin user = admin rgw keystone admin password = aa68fbf2-ec23-4716-8d5c-4f3165606a6a rgw keystone admin project = admin rgw keystone admin domain = default rgw keystone api version = 3 rgw keystone accepted roles = SwiftOperator,admin,_member_, project_admin, member2 rgw keystone token cache size = 500 rgw keystone revocation interval = 500 rgw s3 auth use keystone = true rgw s3 auth use rados = false rgw keystone verify ssl = false
period and realm:
[root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin period get { "id": "c6d8451a-60b6-4a8b-a454-624d43ba50ea", "epoch": 2, "predecessor_uuid": "25e2242d-5101-4ec6-b008-f8c24af8a6ab", "sync_status": [ "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "" ], "period_map": { "id": "c6d8451a-60b6-4a8b-a454-624d43ba50ea", "zonegroups": [ { "id": "6a7141c2-7225-47d9-9d61-989bac070030", "name": "default", "api_name": "", "is_master": "true", "endpoints": [], "hostnames": [], "hostnames_s3website": [], "master_zone": "860f18d7-bf55-4339-8ed0-5733fadf24b7", "zones": [ { "id": "860f18d7-bf55-4339-8ed0-5733fadf24b7", "name": "default", "endpoints": [], "log_meta": "true", "log_data": "false", "bucket_index_max_shards": 0, "read_only": "false" } ], "placement_targets": [ { "name": "default-placement", "tags": [] } ], "default_placement": "default-placement", "realm_id": "818e0400-cafb-404e-b7eb-e80eb6d1acd1" } ], "short_zone_ids": [ { "key": "0eff5159-6017-4796-a027-dcdb29f64d76", "val": 2514385771 }, { "key": "58d66ec8-e120-42cb-9c9b-e6264d4df336", "val": 2029589163 }, { "key": "860f18d7-bf55-4339-8ed0-5733fadf24b7", "val": 542537227 } ] }, "master_zonegroup": "6a7141c2-7225-47d9-9d61-989bac070030", "master_zone": "860f18d7-bf55-4339-8ed0-5733fadf24b7", "period_config": { "bucket_quota": { "enabled": false, "max_size_kb": -1, "max_objects": -1 }, "user_quota": { "enabled": false, "max_size_kb": -1, "max_objects": -1 } }, "realm_id": "818e0400-cafb-404e-b7eb-e80eb6d1acd1", "realm_name": "gold", "realm_epoch": 4 } [root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin realm list { "default_info": "818e0400-cafb-404e-b7eb-e80eb6d1acd1", "realms": [ "gold" ] }
here is radosgw.log
-38> 2016-12-28 14:40:57.571598 7f403c7e8700 1 ====== starting new request req=0x7f403c7e2690 ===== -37> 2016-12-28 14:40:57.571639 7f403c7e8700 2 req 1:0.000042::PUT /swift/v1/ZOOICON::initializing for trans_id = tx000000000000000000001-0058635e79-1d69f0-default -36> 2016-12-28 14:40:57.571680 7f403c7e8700 2 req 1:0.000084:swift:PUT /swift/v1/ZOOICON::getting op 1 -35> 2016-12-28 14:40:57.571688 7f403c7e8700 2 req 1:0.000092:swift:PUT /swift/v1/ZOOICON:create_bucket:authorizing -34> 2016-12-28 14:40:57.598159 7f403c7e8700 0 validated token: CIT-KARA1:ITBJ-CIT-KARA1-OBS01 expires: 1482936424 -33> 2016-12-28 14:40:57.598378 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.10:6806/7257 -- osd_op(client.1927664.0:1678 16.39b94a73 3e43b7b8af4e4c179182513520a264a2$3e43b7b8af4e4c179182513520a264a2 [getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d802eab0 con 0x7f4148035370 -32> 2016-12-28 14:40:57.599662 7f41592f5700 1 -- 10.19.5.207:0/2571996980 <== osd.29 10.19.5.10:6806/7257 22 ==== osd_op_reply(1678 3e43b7b8af4e4c179182513520a264a2$3e43b7b8af4e4c179182513520a264a2 [getxattrs,stat] v0'0 uv0 ack = -2 ((2) No such file or directory)) v7 ==== 227+0+0 (969975904 0 0) 0x7f4181b19050 con 0x7f4148035370 -31> 2016-12-28 14:40:57.599917 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.7:6809/8953 -- osd_op(client.1927664.0:1679 16.b4f8e36f 3e43b7b8af4e4c179182513520a264a2 [getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d802eab0 con 0x7f4148017480 -30> 2016-12-28 14:40:57.601131 7f40e43df700 1 -- 10.19.5.207:0/2571996980 <== osd.12 10.19.5.7:6809/8953 19 ==== osd_op_reply(1679 3e43b7b8af4e4c179182513520a264a2 [getxattrs,stat] v0'0 uv1 ondisk = 0) v7 ==== 194+0+91 (2497331450 0 244455843) 0x7f414c003890 con 0x7f4148017480 -29> 2016-12-28 14:40:57.601217 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.7:6809/8953 -- osd_op(client.1927664.0:1680 16.b4f8e36f 3e43b7b8af4e4c179182513520a264a2 [read 0~524288] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d802f8c0 con 0x7f4148017480 -28> 2016-12-28 14:40:57.601896 7f40e43df700 1 -- 10.19.5.207:0/2571996980 <== osd.12 10.19.5.7:6809/8953 20 ==== osd_op_reply(1680 3e43b7b8af4e4c179182513520a264a2 [read 0~213] v0'0 uv1 ondisk = 0) v7 ==== 152+0+213 (2884334747 0 1239440978) 0x7f414c0039d0 con 0x7f4148017480 -27> 2016-12-28 14:40:57.601976 7f403c7e8700 2 req 1:0.030380:swift:PUT /swift/v1/ZOOICON:create_bucket:normalizing buckets and tenants -26> 2016-12-28 14:40:57.601994 7f403c7e8700 2 req 1:0.030398:swift:PUT /swift/v1/ZOOICON:create_bucket:init permissions -25> 2016-12-28 14:40:57.601996 7f403c7e8700 2 req 1:0.030400:swift:PUT /swift/v1/ZOOICON:create_bucket:recalculating target -24> 2016-12-28 14:40:57.601997 7f403c7e8700 2 req 1:0.030401:swift:PUT /swift/v1/ZOOICON:create_bucket:reading permissions -23> 2016-12-28 14:40:57.601999 7f403c7e8700 2 req 1:0.030402:swift:PUT /swift/v1/ZOOICON:create_bucket:init op -22> 2016-12-28 14:40:57.602005 7f403c7e8700 2 req 1:0.030409:swift:PUT /swift/v1/ZOOICON:create_bucket:verifying op mask -21> 2016-12-28 14:40:57.602009 7f403c7e8700 2 req 1:0.030413:swift:PUT /swift/v1/ZOOICON:create_bucket:verifying op permissions -20> 2016-12-28 14:40:57.602052 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.10:6805/6528 -- osd_op(client.1927664.0:1681 16.7751963d 3e43b7b8af4e4c179182513520a264a2.buckets [call user.list_buckets] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d802de20 con 0x7f4148019dc0 -19> 2016-12-28 14:40:57.605104 7f40787f8700 1 -- 10.19.5.207:0/2571996980 <== osd.27 10.19.5.10:6805/6528 32 ==== osd_op_reply(1681 3e43b7b8af4e4c179182513520a264a2.buckets [call] v0'0 uv9 ondisk = 0) v7 ==== 160+0+474 (297483876 0 2475580009) 0x7f415000aa80 con 0x7f4148019dc0 -18> 2016-12-28 14:40:57.605214 7f403c7e8700 2 req 1:0.033618:swift:PUT /swift/v1/ZOOICON:create_bucket:verifying op params -17> 2016-12-28 14:40:57.605227 7f403c7e8700 2 req 1:0.033631:swift:PUT /swift/v1/ZOOICON:create_bucket:pre-executing -16> 2016-12-28 14:40:57.605232 7f403c7e8700 2 req 1:0.033636:swift:PUT /swift/v1/ZOOICON:create_bucket:executing -15> 2016-12-28 14:40:57.605285 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.9:6802/11370 -- osd_op(client.1927664.0:1682 13.5b6a0465 ZOOICON [call version.read,getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d8011400 con 0x7f4148021770 -14> 2016-12-28 14:40:57.606432 7f40e7bfb700 1 -- 10.19.5.207:0/2571996980 <== osd.21 10.19.5.9:6802/11370 36 ==== osd_op_reply(1682 ZOOICON [call,getxattrs,stat] v0'0 uv2 ondisk = 0) v7 ==== 211+0+139 (3692678046 0 699950790) 0x7f412000d270 con 0x7f4148021770 -13> 2016-12-28 14:40:57.606532 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.9:6802/11370 -- osd_op(client.1927664.0:1683 13.5b6a0465 ZOOICON [call version.check_conds,call version.read,read 0~524288] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d8013e20 con 0x7f4148021770 -12> 2016-12-28 14:40:57.607234 7f40e7bfb700 1 -- 10.19.5.207:0/2571996980 <== osd.21 10.19.5.9:6802/11370 37 ==== osd_op_reply(1683 ZOOICON [call,call,read 0~286] v0'0 uv2 ondisk = 0) v7 ==== 211+0+334 (823587195 0 1033997228) 0x7f412000d270 con 0x7f4148021770 -11> 2016-12-28 14:40:57.607334 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.5:6801/3994 -- osd_op(client.1927664.0:1684 13.ddf9455 .bucket.meta.ZOOICON:860f18d7-bf55-4339-8ed0-5733fadf24b7.2122316.1 [call version.read,getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d8013490 con 0x7f4148042230 -10> 2016-12-28 14:40:57.609647 7f4158aed700 1 -- 10.19.5.207:0/2571996980 <== osd.1 10.19.5.5:6801/3994 48 ==== osd_op_reply(1684 .bucket.meta.ZOOICON:860f18d7-bf55-4339-8ed0-5733fadf24b7.2122316.1 [call,getxattrs,stat] v0'0 uv1 ondisk = 0) v7 ==== 271+0+402 (4110439449 0 1197340682) 0x7f415000b080 con 0x7f4148042230 -9> 2016-12-28 14:40:57.609721 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.5:6801/3994 -- osd_op(client.1927664.0:1685 13.ddf9455 .bucket.meta.ZOOICON:860f18d7-bf55-4339-8ed0-5733fadf24b7.2122316.1 [call version.check_conds,call version.read,read 0~524288] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d8015fc0 con 0x7f4148042230 -8> 2016-12-28 14:40:57.610372 7f4158aed700 1 -- 10.19.5.207:0/2571996980 <== osd.1 10.19.5.5:6801/3994 49 ==== osd_op_reply(1685 .bucket.meta.ZOOICON:860f18d7-bf55-4339-8ed0-5733fadf24b7.2122316.1 [call,call,read 0~327] v0'0 uv1 ondisk = 0) v7 ==== 271+0+375 (1737290596 0 947934107) 0x7f415000b260 con 0x7f4148042230 -7> 2016-12-28 14:40:57.610440 7f403c7e8700 0 WARNING: couldn't find acl header for object, generating default -6> 2016-12-28 14:40:57.610475 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.7:6809/8953 -- osd_op(client.1927664.0:1686 16.b4f8e36f 3e43b7b8af4e4c179182513520a264a2 [getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d80147a0 con 0x7f4148017480 -5> 2016-12-28 14:40:57.611118 7f40e43df700 1 -- 10.19.5.207:0/2571996980 <== osd.12 10.19.5.7:6809/8953 21 ==== osd_op_reply(1686 3e43b7b8af4e4c179182513520a264a2 [getxattrs,stat] v0'0 uv1 ondisk = 0) v7 ==== 194+0+91 (2497331450 0 244455843) 0x7f414c003890 con 0x7f4148017480 -4> 2016-12-28 14:40:57.611216 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.10:6808/6900 -- osd_op(client.1927664.0:1687 13.562fa4b3 .pools.avail [getxattrs,stat] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d8014760 con 0x7f41480492f0 -3> 2016-12-28 14:40:57.611964 7f40e69e9700 1 -- 10.19.5.207:0/2571996980 <== osd.28 10.19.5.10:6808/6900 25 ==== osd_op_reply(1687 .pools.avail [getxattrs,stat] v0'0 uv4 ondisk = 0) v7 ==== 174+0+20 (2093623951 0 3602961541) 0x7f41380008c0 con 0x7f41480492f0 -2> 2016-12-28 14:40:57.612044 7f403c7e8700 1 -- 10.19.5.207:0/2571996980 --> 10.19.5.10:6808/6900 -- osd_op(client.1927664.0:1688 13.562fa4b3 .pools.avail [read 0~524288] snapc 0=[] ack+read+known_if_redirected e1454) v7 -- ?+0 0x7f40d8015060 con 0x7f41480492f0 -1> 2016-12-28 14:40:57.612484 7f40e69e9700 1 -- 10.19.5.207:0/2571996980 <== osd.28 10.19.5.10:6808/6900 26 ==== osd_op_reply(1688 .pools.avail [read 0~36] v0'0 uv4 ondisk = 0) v7 ==== 132+0+36 (4256052527 0 1458669308) 0x7f41380008c0 con 0x7f41480492f0 0> 2016-12-28 14:40:57.613814 7f403c7e8700 -1 *** Caught signal (Segmentation fault) ** in thread 7f403c7e8700 thread_name:radosgw ceph version 10.2.1 (3a66dd4f30852819c1bdaa8ec23c795d4ad77269) 1: (()+0x54774a) [0x7f417606f74a] 2: (()+0xf130) [0x7f41754ab130] 3: (std::string::assign(std::string const&)+0x19) [0x7f417504fb39] 4: (RGWRados::select_legacy_bucket_placement(std::string const&, std::string const&, rgw_bucket&, RGWZonePlacementInfo*)+0x300) [0x7f4175f1a3d0] 5: (RGWCreateBucket::execute()+0x5cc) [0x7f4175ee14dc] 6: (process_request(RGWRados*, RGWREST*, RGWRequest*, RGWStreamIO*, OpsLogSocket*)+0xd07) [0x7f4175ef4f67] 7: (()+0x19373) [0x7f417f991373] 8: (()+0x232ef) [0x7f417f99b2ef] 9: (()+0x252d8) [0x7f417f99d2d8] 10: (()+0x7df3) [0x7f41754a3df3] 11: (clone()+0x6d) [0x7f4174ab03dd] NOTE: a copy of the executable, or `objdump -rdS <executable>` is needed to interpret this.
and period list:
[root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin period list { "periods": [ "03ff5dce-d579-4d67-8265-4889aec6a63e", "25e2242d-5101-4ec6-b008-f8c24af8a6ab", "57b08538-93e9-4625-86df-ff0c7840cfcb", "7bfdac5e-5d76-47cc-a345-86203274d902", "818e0400-cafb-404e-b7eb-e80eb6d1acd1:staging", "95124884-469a-4349-af9e-e4ffdc3108e2", "c6d8451a-60b6-4a8b-a454-624d43ba50ea", "e25e0ac9-d07e-48a6-9369-da5c11805cbc", "f0104e3b-b419-4a04-83f1-a955cb1fd0da", "f912388f-0b8b-46a6-b170-6c338e3d8e06" ] }
and how can I rollback period to "e25e0ac9-d07e-48a6-9369-da5c11805cbc"?
History
#1 Updated by chen bob about 7 years ago
[root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin realm list-periods { "current_period": "c6d8451a-60b6-4a8b-a454-624d43ba50ea", "periods": [ "c6d8451a-60b6-4a8b-a454-624d43ba50ea", "25e2242d-5101-4ec6-b008-f8c24af8a6ab", "7bfdac5e-5d76-47cc-a345-86203274d902", "e25e0ac9-d07e-48a6-9369-da5c11805cbc" ] } [root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin period get --period=95124884-469a-4349-af9e-e4ffdc3108e2 { "id": "95124884-469a-4349-af9e-e4ffdc3108e2", "epoch": 1, "predecessor_uuid": "", "sync_status": [], "period_map": { "id": "95124884-469a-4349-af9e-e4ffdc3108e2", "zonegroups": [], "short_zone_ids": [] }, "master_zonegroup": "", "master_zone": "", "period_config": { "bucket_quota": { "enabled": false, "max_size_kb": -1, "max_objects": -1 }, "user_quota": { "enabled": false, "max_size_kb": -1, "max_objects": -1 } }, "realm_id": "10db1a01-9595-44a7-a9af-07a6722fe2d0", "realm_name": "new-itc", "realm_epoch": 1 }
#2 Updated by chen bob about 7 years ago
and when I upload large file, radosgw will crush .
here is my log:
2017-01-05 13:47:24.069468 7fa27afd5700 1 civetweb: 0x7fa32c00bd80: 10.19.60.22 - - [05/Jan/2017:13:47:23 +0800] "GET /swift/v1 HTTP/1.1" 200 0 - - 2017-01-05 13:47:26.655058 7fa27a7d4700 1 ====== starting new request req=0x7fa27a7ce690 ===== 2017-01-05 13:47:26.659423 7fa27a7d4700 1 ====== req done req=0x7fa27a7ce690 op status=0 http_status=200 ====== 2017-01-05 13:47:26.659474 7fa27a7d4700 1 civetweb: 0x7fa33400f190: 10.19.60.22 - - [05/Jan/2017:13:47:26 +0800] "GET /swift/v1/container1 HTTP/1.1" 200 0 - - 2017-01-05 13:48:16.477105 7fa279fd3700 1 ====== starting new request req=0x7fa279fcd690 ===== 2017-01-05 13:48:16.557271 7fa279fd3700 1 ====== req done req=0x7fa279fcd690 op status=1900 http_status=201 ====== 2017-01-05 13:48:16.557328 7fa279fd3700 1 civetweb: 0x7fa2ec00e4c0: 10.19.60.22 - - [05/Jan/2017:13:48:16 +0800] "PUT /swift/v1/_segments_container1 HTTP/1.1" 201 0 http://10.19.60.24/buckets/container1 Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0 2017-01-05 13:48:17.130258 7fa2797d2700 1 ====== starting new request req=0x7fa2797cc690 ===== 2017-01-05 13:48:17.133987 7fa2797d2700 1 ====== req done req=0x7fa2797cc690 op status=0 http_status=404 ====== 2017-01-05 13:48:17.134039 7fa2797d2700 1 civetweb: 0x7fa35c00ec30: 10.19.60.22 - - [05/Jan/2017:13:48:17 +0800] "HEAD /swift/v1/_segments_container1/api.log-3 HTTP/1.1" 404 0 http://10.19.60.24/buckets/container1 Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0 2017-01-05 13:48:33.175216 7fa278fd1700 1 ====== starting new request req=0x7fa278fcb690 ===== 2017-01-05 13:48:33.252290 7fa278fd1700 1 ====== req done req=0x7fa278fcb690 op status=1900 http_status=201 ====== 2017-01-05 13:48:33.252368 7fa278fd1700 1 civetweb: 0x7fa2b0002ff0: 10.19.60.22 - - [05/Jan/2017:13:48:33 +0800] "PUT /swift/v1/_segments_container1/api.log-1 HTTP/1.1" 201 0 http://10.19.60.24/buckets/container1 Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0 2017-01-05 13:48:34.746514 7fa2787d0700 1 ====== starting new request req=0x7fa2787ca690 ===== 2017-01-05 13:48:34.749163 7fa2787d0700 1 ====== req done req=0x7fa2787ca690 op status=0 http_status=404 ====== 2017-01-05 13:48:34.749204 7fa2787d0700 1 civetweb: 0x7fa360002ff0: 10.19.60.22 - - [05/Jan/2017:13:48:34 +0800] "HEAD /swift/v1/_segments_container1/api.log-6 HTTP/1.1" 404 0 http://10.19.60.24/buckets/container1 Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0 2017-01-05 13:48:47.025764 7fa277fcf700 1 ====== starting new request req=0x7fa277fc9690 ===== 2017-01-05 13:48:47.094075 7fa277fcf700 1 ====== req done req=0x7fa277fc9690 op status=1900 http_status=201 ====== 2017-01-05 13:48:47.094142 7fa277fcf700 1 civetweb: 0x7fa29c004810: 10.19.60.22 - - [05/Jan/2017:13:48:47 +0800] "PUT /swift/v1/_segments_container1/api.log-5 HTTP/1.1" 201 0 http://10.19.60.24/buckets/container1 Mozilla/5.0 (Windows NT 6.1; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0 2017-01-05 13:48:47.671384 7fa2777ce700 1 ====== starting new request req=0x7fa2777c8690 ===== 2017-01-05 13:48:47.677217 7fa2777ce700 1 ====== req done req=0x7fa2777c8690 op status=0 http_status=200 ====== 2017-01-05 13:48:47.677275 7fa2777ce700 1 civetweb: 0x7fa328002ff0: 10.19.60.22 - - [05/Jan/2017:13:48:47 +0800] "GET /swift/v1/container1 HTTP/1.1" 200 0 - - 2017-01-05 13:53:30.527722 7fa27dfdb700 0 ERROR: signer 0 status = SigningCertNotFound 2017-01-05 13:53:30.527741 7fa27dfdb700 0 ERROR: problem decoding 2017-01-05 13:53:30.527743 7fa27dfdb700 0 ceph_decode_cms returned -22 2017-01-05 13:53:30.527750 7fa27dfdb700 0 ERROR: keystone revocation processing returned error r=-22 2017-01-05 14:01:50.567101 7fa27dfdb700 0 ERROR: signer 0 status = SigningCertNotFound 2017-01-05 14:01:50.567115 7fa27dfdb700 0 ERROR: problem decoding 2017-01-05 14:01:50.567117 7fa27dfdb700 0 ceph_decode_cms returned -22 2017-01-05 14:01:50.567134 7fa27dfdb700 0 ERROR: keystone revocation processing returned error r=-22 2017-01-05 14:01:59.660844 7fa276fcd700 1 ====== starting new request req=0x7fa276fc7690 ===== [root@AIBJ-ITC-CONTROL-L-2 ~]# ssh 10.19.3.207 Warning: Permanently added '10.19.3.207' (ECDSA) to the list of known hosts. Last login: Thu Jan 5 13:42:44 2017 from 10.1.237.11 [root@AIBJ-ITC-RADOSGW-1 ~]# vim /var/log/ceph/ceph-client.radosgw.gateway.log ceph version 10.2.1 (3a66dd4f30852819c1bdaa8ec23c795d4ad77269) 1: (()+0x54774a) [0x7f38c1a9274a] 2: (()+0xf130) [0x7f38c0ece130] 3: (std::string::assign(std::string const&)+0x19) [0x7f38c0a72b39] 4: (RGWRados::select_legacy_bucket_placement(std::string const&, std::string const&, rgw_bucket&, RGWZonePlacementInfo*)+0x300) [0x7f38c193d3d0] 5: (RGWCreateBucket::execute()+0x5cc) [0x7f38c19044dc] 6: (process_request(RGWRados*, RGWREST*, RGWRequest*, RGWStreamIO*, OpsLogSocket*)+0xd07) [0x7f38c1917f67] 7: (()+0x19373) [0x7f38cb3b4373] 8: (()+0x232ef) [0x7f38cb3be2ef] 9: (()+0x252d8) [0x7f38cb3c02d8] 10: (()+0x7df3) [0x7f38c0ec6df3] 11: (clone()+0x6d) [0x7f38c04d33dd] NOTE: a copy of the executable, or `objdump -rdS <executable>` is needed to interpret this.
#3 Updated by Yehuda Sadeh about 7 years ago
How does the zone config look like? Is the default-placement defined there?
#4 Updated by chen bob about 7 years ago
Yehuda Sadeh wrote:
How does the zone config look like? Is the default-placement defined there?
how can I get zone config? use what command ? thanks
#5 Updated by Sirisha Guduru about 7 years ago
I think you can get the zone config this way:
radosgw-admin zone get --rgw-zone=<zone name>
#6 Updated by chen bob about 7 years ago
Sirisha Guduru wrote:
I think you can get the zone config this way:
radosgw-admin zone get --rgw-zone=<zone name>
[root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin zone get --rgw-zone=default { "id": "860f18d7-bf55-4339-8ed0-5733fadf24b7", "name": "default", "domain_root": "default.rgw.data.root", "control_pool": "default.rgw.control", "gc_pool": "default.rgw.gc", "log_pool": "default.rgw.log", "intent_log_pool": "default.rgw.intent-log", "usage_log_pool": "default.rgw.usage", "user_keys_pool": "default.rgw.users.keys", "user_email_pool": "default.rgw.users.email", "user_swift_pool": "default.rgw.users.swift", "user_uid_pool": "default.rgw.users.uid", "system_key": { "access_key": "", "secret_key": "" }, "placement_pools": [], "metadata_heap": "default.rgw.meta", "realm_id": "818e0400-cafb-404e-b7eb-e80eb6d1acd1" } [root@AIBJ-ITC-RADOSGW-1 ~]# radosgw-admin zone get --rgw-zone=new-itc { "id": "430ff3f3-706f-4c46-b433-ba7b5c81dc02", "name": "new-itc", "domain_root": "new-itc.rgw.data.root", "control_pool": "new-itc.rgw.control", "gc_pool": "new-itc.rgw.gc", "log_pool": "new-itc.rgw.log", "intent_log_pool": "new-itc.rgw.intent-log", "usage_log_pool": "new-itc.rgw.usage", "user_keys_pool": "new-itc.rgw.users.keys", "user_email_pool": "new-itc.rgw.users.email", "user_swift_pool": "new-itc.rgw.users.swift", "user_uid_pool": "new-itc.rgw.users.uid", "system_key": { "access_key": "", "secret_key": "" }, "placement_pools": [ { "key": "default-placement", "val": { "index_pool": "new-itc.rgw.buckets.index", "data_pool": "new-itc.rgw.buckets.data", "data_extra_pool": "new-itc.rgw.buckets.non-ec", "index_type": 0 } } ], "metadata_heap": "new-itc.rgw.meta", "realm_id": "" }
#7 Updated by chen bob about 7 years ago
I notice that the difference of new-itc and default zone is placement_pools, the placement_pools of default zone is empty.
how can I set default zone's placemen_pools like new-itc zone ?
#8 Updated by Matt Benjamin about 7 years ago
- Status changed from New to Fix Under Review
- Assignee set to Orit Wasserman