Project

General

Profile

Bug #22556 » gz-sh-s3-install.txt

Amine Liu, 01/03/2018 08:07 AM

 
#网络需求:
上海集群:配置一个vip和域名s3sh.imu.cn -->172.18.52.225
172.18.216.129
172.18.216.113
172.18.216.97

广州集群:配置一个vip和域名s3gz.imu.cn -->172.26.99.2
172.26.217.48
172.26.217.49
172.26.217.50

上海广州vip互通。

172.26.217.16、17、18 配置一个vip,绑定域名s3sxtest.imu.cn

三个vip下节点能访问任意域名
#master nede:
ceph-deploy new sx-3f3r-ceph-s3-c1-01 sx-3f3r-ceph-s3-c1-02 sx-3f3r-ceph-s3-c1-03
ceph-deploy mon create-initial
ceph-deploy --overwrite-conf admin sx-3f3r-ceph-s3-c1-01 sx-3f3r-ceph-s3-c1-02 sx-3f3r-ceph-s3-c1-03
#ceph-deploy mgr create sx-3f3r-ceph-s3-c1-03 # L版本才支持mgr
#ceph mgr module enable dashboard
#
#An address where the dashboard will listen on needs to be configured as well, set this to ``::`` to listen on all
#IPv4 and IPv6 addresses
#ceph config-key set mgr/dashboard/server_addr ::

for i in {a..n};do ceph-deploy disk zap sx-3f3r-ceph-s3-c1-01:sd$i;done
for i in {a..n};do ceph-deploy disk zap sx-3f3r-ceph-s3-c1-02:sd$i;done
for i in {a..n};do ceph-deploy disk zap sx-3f3r-ceph-s3-c1-03:sd$i;done

for i in {a..n};do ceph-deploy --overwrite-conf osd create sx-3f3r-ceph-s3-c1-01:sd$i;done
for i in {a..n};do ceph-deploy --overwrite-conf osd create sx-3f3r-ceph-s3-c1-02:sd$i;done
for i in {a..n};do ceph-deploy --overwrite-conf osd create sx-3f3r-ceph-s3-c1-03:sd$i;done


#slave node :
ceph-deploy new xxy-2f201r-ceph-s3-c1-01 xxy-2f201r-ceph-s3-c1-02 xxy-2f201r-ceph-s3-c1-03
ceph-deploy mon create-initial
#for i in {b..m};do sh -c "dd if=/dev/zero of=/dev/sd$i bs=10M count=400&";done
for i in {b..m};do ceph-deploy disk zap xxy-2f201r-ceph-s3-c1-01:sd$i;done
for i in {b..m};do ceph-deploy osd create xxy-2f201r-ceph-s3-c1-01:sd$i;done
for i in {b..m};do ceph-deploy disk zap xxy-2f201r-ceph-s3-c1-02:sd$i;done
for i in {b..m};do ceph-deploy osd create xxy-2f201r-ceph-s3-c1-02:sd$i;done
for i in {b..m};do ceph-deploy disk zap xxy-2f201r-ceph-s3-c1-03:sd$i;done
for i in {b..m};do ceph-deploy osd create xxy-2f201r-ceph-s3-c1-03:sd$i;done

#####
#安装rgw
ceph-deploy install --rgw sx-3f3r-ceph-s3-c1-01 sx-3f3r-ceph-s3-c1-02 sx-3f3r-ceph-s3-c1-03
#建议 yum -y install ceph-radosgw

ceph-deploy rgw create sx-3f3r-ceph-s3-c1-01 sx-3f3r-ceph-s3-c1-02 sx-3f3r-ceph-s3-c1-03
#创建主领域realm

radosgw-admin realm create --rgw-realm=imusic --default

#创建组zonegroup
radosgw-admin zonegroup create --rgw-zonegroup=imu --endpoints=http://s3gz.imu.cn:80 --rgw-realm=imusic --master --default
{
"id": "41e5334a-0b66-4e39-bdad-e210c8ca8730",
"name": "imusic",
"current_period": "d220c25c-dbf7-44b1-b410-f5cc30c52eb1",
"epoch": 1
}

[root@sx-3f3r-ceph-s3-c1-03 my-cluster]# radosgw-admin zonegroup create --rgw-zonegroup=imu --endpoints=http://s3gz.imu.cn:80 --rgw-realm=imusic --master --default
{
"id": "1825aa39-e1d3-42ca-8cb6-3920335f2434",
"name": "imu",
"api_name": "imu",
"is_master": "true",
"endpoints": [
"http:\/\/s3gz.imu.cn:80"
],
"hostnames": [],
"hostnames_s3website": [],
"master_zone": "",
"zones": [],
"placement_targets": [],
"default_placement": "",
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730"
}

#创建zone
radosgw-admin zone create --rgw-zonegroup=imu --rgw-zone=imu-gzsx \
--master --default \
--endpoints=http://s3gz.imu.cn:80
{
"id": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"name": "imu-gzsx",
"domain_root": "imu-gzsx.rgw.data.root",
"control_pool": "imu-gzsx.rgw.control",
"gc_pool": "imu-gzsx.rgw.gc",
"log_pool": "imu-gzsx.rgw.log",
"intent_log_pool": "imu-gzsx.rgw.intent-log",
"usage_log_pool": "imu-gzsx.rgw.usage",
"user_keys_pool": "imu-gzsx.rgw.users.keys",
"user_email_pool": "imu-gzsx.rgw.users.email",
"user_swift_pool": "imu-gzsx.rgw.users.swift",
"user_uid_pool": "imu-gzsx.rgw.users.uid",
"system_key": {
"access_key": "",
"secret_key": ""
},
"placement_pools": [
{
"key": "default-placement",
"val": {
"index_pool": "imu-gzsx.rgw.buckets.index",
"data_pool": "imu-gzsx.rgw.buckets.data",
"data_extra_pool": "imu-gzsx.rgw.buckets.non-ec",
"index_type": 0
}
}
],
"metadata_heap": "",
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730"
}

#创建同步用户
radosgw-admin user create --uid="synchronization-user" --display-name="sync-user" --system --access-key=K9237GSUFH0SZRQHBBP2 --secret=bKob0LirCx48EHrTAwDlB563rXa9dDziuJBHOQDS


#删除默认
radosgw-admin zonegroup remove --rgw-zonegroup=default --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zone delete --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zonegroup delete --rgw-zonegroup=default
radosgw-admin period update --commit

rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
#给zone添加用户
radosgw-admin zone modify --rgw-zone=imu-gzsx --access-key=K9237GSUFH0SZRQHBBP2 --secret=bKob0LirCx48EHrTAwDlB563rXa9dDziuJBHOQDS

#使配置永久生效
radosgw-admin period update --commit




#修改配置

[client.rgw.sx-3f3r-ceph-s3-c1-01]
host = sx-3f3r-ceph-s3-c1-01
rgw frontends = "civetweb port=80"
rgw_zone=imu-gzsx # add
rgw_dns_name = s3gz.imu.cn #add
rgw_content_length_compat = true

[client.rgw.sx-3f3r-ceph-s3-c1-02]
host = sx-3f3r-ceph-s3-c1-02
rgw frontends = "civetweb port=80"
rgw_zone=imu-gzsx
rgw_dns_name = s3gz.imu.cn
rgw_content_length_compat = true

[client.rgw.sx-3f3r-ceph-s3-c1-03]
host = sx-3f3r-ceph-s3-c1-03
rgw frontends = "civetweb port=80"
rgw_zone=imu-gzsx
rgw_dns_name = s3gz.imu.cn
rgw_content_length_compat = true


ceph-deploy --overwrite-conf config push sx-3f3r-ceph-s3-c1-0{1..3}



#重启gw

#
systemctl enable ceph-radosgw@rgw.`hostname -s`
systemctl restart ceph-radosgw@rgw.`hostname -s`
systemctl status ceph-radosgw@rgw.`hostname -s`




#第二个集群:slave zone

#安装gw
ceph-deploy install --rgw xxy-2f201r-ceph-s3-c1-01 xxy-2f201r-ceph-s3-c1-02 xxy-2f201r-ceph-s3-c1-03
#建议每个节点执行 yum -y install ceph-radosgw

ceph-deploy --overwrite-conf rgw create xxy-2f201r-ceph-s3-c1-01 xxy-2f201r-ceph-s3-c1-02 xxy-2f201r-ceph-s3-c1-03

#增加rgw配置:
[client.rgw.xxy-2f201r-ceph-s3-c1-01]
host = xxy-2f201r-ceph-s3-c1-01
rgw_frontends = "civetweb port=80"

[client.rgw.xxy-2f201r-ceph-s3-c1-02]
host = xxy-2f201r-ceph-s3-c1-02
rgw_frontends = "civetweb port=80"

[client.rgw.xxy-2f201r-ceph-s3-c1-03]
host = xxy-2f201r-ceph-s3-c1-03
rgw_frontends = "civetweb port=80"

ceph-deploy --overwrite-conf config push xxy-2f201r-ceph-s3-c1-0{1..3}

systemctl restart ceph-radosgw@rgw.`hostname -s`

#拉区域
radosgw-admin realm pull --url=s3gz.imu.cn:80 --access-key=K9237GSUFH0SZRQHBBP2 --secret=bKob0LirCx48EHrTAwDlB563rXa9dDziuJBHOQDS


2018-01-03 15:43:53.817285 7f928aabc9c0 1 error read_lastest_epoch .rgw.root:periods.0f9aa0e6-2a6e-4689-abee-d64f43f0ccac.latest_epoch
2018-01-03 15:43:53.852182 7f928aabc9c0 1 Set the period's master zonegroup 1825aa39-e1d3-42ca-8cb6-3920335f2434 as the default
{
"id": "41e5334a-0b66-4e39-bdad-e210c8ca8730",
"name": "imusic",
"current_period": "0f9aa0e6-2a6e-4689-abee-d64f43f0ccac",
"epoch": 2
}

#拉 period
radosgw-admin period pull --url=s3gz.imu.cn:80 --access-key=K9237GSUFH0SZRQHBBP2 --secret=bKob0LirCx48EHrTAwDlB563rXa9dDziuJBHOQDS

2018-01-03 15:44:55.503030 7fcb113879c0 1 found existing latest_epoch 4 >= given epoch 4, returning r=-17
{
"id": "0f9aa0e6-2a6e-4689-abee-d64f43f0ccac",
"epoch": 4,
"predecessor_uuid": "d220c25c-dbf7-44b1-b410-f5cc30c52eb1",
"sync_status": [],
"period_map": {
"id": "0f9aa0e6-2a6e-4689-abee-d64f43f0ccac",
"zonegroups": [
{
"id": "1825aa39-e1d3-42ca-8cb6-3920335f2434",
"name": "imu",
"api_name": "imu",
"is_master": "true",
"endpoints": [
"http:\/\/s3gz.imu.cn:80"
],
"hostnames": [],
"hostnames_s3website": [],
"master_zone": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"zones": [
{
"id": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"name": "imu-gzsx",
"endpoints": [
"http:\/\/s3gz.imu.cn:80"
],
"log_meta": "false",
"log_data": "false",
"bucket_index_max_shards": 0,
"read_only": "false"
}
],
"placement_targets": [
{
"name": "default-placement",
"tags": []
}
],
"default_placement": "default-placement",
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730"
}
],
"short_zone_ids": [
{
"key": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"val": 1331659605
}
]
},
"master_zonegroup": "1825aa39-e1d3-42ca-8cb6-3920335f2434",
"master_zone": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"period_config": {
"bucket_quota": {
"enabled": false,
"max_size_kb": -1,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"max_size_kb": -1,
"max_objects": -1
}
},
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730",
"realm_name": "imusic",
"realm_epoch": 2
}

radosgw-admin realm default --rgw-realm=imusic
radosgw-admin zonegroup default --rgw-zonegroup=imu

#创建slave领域

radosgw-admin zone create --rgw-zonegroup=imu --rgw-zone=imu-shxxy --access-key=K9237GSUFH0SZRQHBBP2 --secret=bKob0LirCx48EHrTAwDlB563rXa9dDziuJBHOQDS --endpoints=http://s3sh.imu.cn:80 --default

2018-01-03 15:48:15.182574 7fd289e969c0 0 failed reading obj info from .rgw.root:zone_info.f608ce2b-5584-45af-b0c5-f4896995bd22: (2) No such file or directory
2018-01-03 15:48:15.182614 7fd289e969c0 0 WARNING: could not read zone params for zone id=f608ce2b-5584-45af-b0c5-f4896995bd22 name=imu-gzsx
{
"id": "01e0adba-e978-4c2f-be85-f42864464e87",
"name": "imu-shxxy",
"domain_root": "imu-shxxy.rgw.data.root",
"control_pool": "imu-shxxy.rgw.control",
"gc_pool": "imu-shxxy.rgw.gc",
"log_pool": "imu-shxxy.rgw.log",
"intent_log_pool": "imu-shxxy.rgw.intent-log",
"usage_log_pool": "imu-shxxy.rgw.usage",
"user_keys_pool": "imu-shxxy.rgw.users.keys",
"user_email_pool": "imu-shxxy.rgw.users.email",
"user_swift_pool": "imu-shxxy.rgw.users.swift",
"user_uid_pool": "imu-shxxy.rgw.users.uid",
"system_key": {
"access_key": "K9237GSUFH0SZRQHBBP2",
"secret_key": "bKob0LirCx48EHrTAwDlB563rXa9dDziuJBHOQDS"
},
"placement_pools": [
{
"key": "default-placement",
"val": {
"index_pool": "imu-shxxy.rgw.buckets.index",
"data_pool": "imu-shxxy.rgw.buckets.data",
"data_extra_pool": "imu-shxxy.rgw.buckets.non-ec",
"index_type": 0
}
}
],
"metadata_heap": "",
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730"
}

#update

radosgw-admin period update --commit --rgw-zone=imu-shxxy

2018-01-03 15:48:38.326358 7f8465b9a9c0 1 Cannot find zone id=01e0adba-e978-4c2f-be85-f42864464e87 (name=imu-shxxy), switching to local zonegroup configuration
Sending period to new master zone f608ce2b-5584-45af-b0c5-f4896995bd22
{
"id": "0f9aa0e6-2a6e-4689-abee-d64f43f0ccac",
"epoch": 5,
"predecessor_uuid": "d220c25c-dbf7-44b1-b410-f5cc30c52eb1",
"sync_status": [],
"period_map": {
"id": "0f9aa0e6-2a6e-4689-abee-d64f43f0ccac",
"zonegroups": [
{
"id": "1825aa39-e1d3-42ca-8cb6-3920335f2434",
"name": "imu",
"api_name": "imu",
"is_master": "true",
"endpoints": [
"http:\/\/s3gz.imu.cn:80"
],
"hostnames": [],
"hostnames_s3website": [],
"master_zone": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"zones": [
{
"id": "01e0adba-e978-4c2f-be85-f42864464e87",
"name": "imu-shxxy",
"endpoints": [
"http:\/\/s3sh.imu.cn:80"
],
"log_meta": "false",
"log_data": "true",
"bucket_index_max_shards": 0,
"read_only": "false"
},
{
"id": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"name": "imu-gzsx",
"endpoints": [
"http:\/\/s3gz.imu.cn:80"
],
"log_meta": "false",
"log_data": "true",
"bucket_index_max_shards": 0,
"read_only": "false"
}
],
"placement_targets": [
{
"name": "default-placement",
"tags": []
}
],
"default_placement": "default-placement",
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730"
}
],
"short_zone_ids": [
{
"key": "01e0adba-e978-4c2f-be85-f42864464e87",
"val": 3509215161
},
{
"key": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"val": 1331659605
}
]
},
"master_zonegroup": "1825aa39-e1d3-42ca-8cb6-3920335f2434",
"master_zone": "f608ce2b-5584-45af-b0c5-f4896995bd22",
"period_config": {
"bucket_quota": {
"enabled": false,
"max_size_kb": -1,
"max_objects": -1
},
"user_quota": {
"enabled": false,
"max_size_kb": -1,
"max_objects": -1
}
},
"realm_id": "41e5334a-0b66-4e39-bdad-e210c8ca8730",
"realm_name": "imusic",
"realm_epoch": 2
}

#删除默认
radosgw-admin zonegroup remove --rgw-zonegroup=default --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zone delete --rgw-zone=default
radosgw-admin period update --commit
radosgw-admin zonegroup delete --rgw-zonegroup=default
radosgw-admin period update --commit

rados rmpool default.rgw.control default.rgw.control --yes-i-really-really-mean-it
rados rmpool default.rgw.meta default.rgw.meta --yes-i-really-really-mean-it
rados rmpool default.rgw.log default.rgw.log --yes-i-really-really-mean-it
rados rmpool default.rgw.data.root default.rgw.data.root --yes-i-really-really-mean-it
rados rmpool default.rgw.gc default.rgw.gc --yes-i-really-really-mean-it
rados rmpool default.rgw.users.keys default.rgw.users.keys --yes-i-really-really-mean-it


#修改配置

[client.rgw.xxy-2f201r-ceph-s3-c1-01]
host = xxy-2f201r-ceph-s3-c1-01
rgw_frontends = "civetweb port=80"
rgw_zone=imu-shxxy
rgw_dns_name = s3sh.imu.cn
rgw_content_length_compat = true

ceph-deploy --overwrite-conf config push xxy-2f201r-ceph-s3-c1-0{1..3}

systemctl restart ceph-radosgw@rgw.`hostname -s`
#

ERROR:

2018-01-03 15:53:54.081272 7ffae4959700 0 -- 172.18.216.129:0/3441061439 submit_message mon_subscribe({osdmap=202}) v2 remote, 172.18.216.113:6789/0, failed lossy con, dropping message 0x7ffacc016320
2018-01-03 15:53:54.081304 7ffacbfff700 0 -- 172.18.216.129:0/1830369741 submit_message mon_subscribe({osdmap=202}) v2 remote, 172.18.216.113:6789/0, failed lossy con, dropping message 0x7ffabc0130f0
2018-01-03 15:53:54.084664 7ffacbfff700 0 monclient: hunting for new mon
2018-01-03 15:53:54.098603 7ffae4959700 0 monclient: hunting for new mon
2018-01-03 15:53:54.157355 7ff7b2e79700 1 rgw meta sync: epoch=0 in sync status comes before remote's oldest mdlog epoch=1, restarting sync
2018-01-03 15:53:54.167100 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:53:54.167116 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16
2018-01-03 15:53:55.257017 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:53:55.257032 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16
2018-01-03 15:53:57.336909 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:53:57.336925 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16
2018-01-03 15:54:01.404930 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:54:01.404946 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16
2018-01-03 15:54:02.123630 7ff7aba67700 1 ====== starting new request req=0x7ff7aba61710 =====
2018-01-03 15:54:03.285785 7ff7ab266700 1 ====== starting new request req=0x7ff7ab260710 =====
2018-01-03 15:54:03.786137 7ff7ab266700 1 ====== req done req=0x7ff7ab260710 op status=0 http_status=403 ======
2018-01-03 15:54:03.786178 7ff7aba67700 1 ====== req done req=0x7ff7aba61710 op status=0 http_status=403 ======
2018-01-03 15:54:03.786195 7ff7ab266700 1 civetweb: 0x7ffa3000b370: 172.18.52.241 - - [03/Jan/2018:15:54:03 +0800] "GET /admin/log HTTP/1.1" 403 0 - -
2018-01-03 15:54:03.786272 7ff7aba67700 1 civetweb: 0x7ffaac01de80: 172.18.52.241 - - [03/Jan/2018:15:54:02 +0800] "GET /admin/log HTTP/1.1" 403 0 - -

2018-01-03 15:54:09.456163 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:54:09.456177 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16
2018-01-03 15:54:22.011654 7ff7a725e700 1 ====== starting new request req=0x7ff7a7258710 =====
2018-01-03 15:54:22.015628 7ff7a725e700 1 ====== req done req=0x7ff7a7258710 op status=0 http_status=403 ======
2018-01-03 15:54:22.015697 7ff7a725e700 1 civetweb: 0x7ffaac0223c0: 172.18.52.242 - - [03/Jan/2018:15:54:22 +0800] "GET /admin/log HTTP/1.1" 403 0 - -
2018-01-03 15:54:25.508925 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:54:25.508941 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16

2018-01-03 15:54:41.801518 7ff7a2a55700 1 ====== starting new request req=0x7ff7a2a4f710 =====
2018-01-03 15:54:41.805426 7ff7a2a55700 1 ====== req done req=0x7ff7a2a4f710 op status=0 http_status=403 ======
2018-01-03 15:54:41.806707 7ff7a2254700 1 ====== starting new request req=0x7ff7a224e710 =====
2018-01-03 15:54:41.810124 7ff7a2254700 1 ====== req done req=0x7ff7a224e710 op status=0 http_status=403 ======
2018-01-03 15:54:41.834157 7ff7a2a55700 1 civetweb: 0x7ffaa40219c0: 172.18.52.242 - - [03/Jan/2018:15:54:41 +0800] "POST /admin/realm/period HTTP/1.1" 403 0 - -
2018-01-03 15:54:41.838756 7ff7a2254700 1 civetweb: 0x7ffaac0358e0: 172.18.52.241 - - [03/Jan/2018:15:54:41 +0800] "POST /admin/realm/period HTTP/1.1" 403 0 - -
2018-01-03 15:54:55.562206 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:54:55.562224 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16
2018-01-03 15:55:02.011254 7ff79da4b700 1 ====== starting new request req=0x7ff79da45710 =====
2018-01-03 15:55:02.015640 7ff79da4b700 1 ====== req done req=0x7ff79da45710 op status=0 http_status=403 ======
2018-01-03 15:55:02.015709 7ff79da4b700 1 civetweb: 0x7ffaac048d90: 172.18.52.242 - - [03/Jan/2018:15:55:02 +0800] "GET /admin/log HTTP/1.1" 403 0 - -

2018-01-03 15:55:11.864108 7ff79aa45700 1 ====== starting new request req=0x7ff79aa3f710 =====
2018-01-03 15:55:11.868045 7ff79aa45700 1 ====== req done req=0x7ff79aa3f710 op status=0 http_status=403 ======
2018-01-03 15:55:11.868798 7ff79a244700 1 ====== starting new request req=0x7ff79a23e710 =====
2018-01-03 15:55:11.872069 7ff79a244700 1 ====== req done req=0x7ff79a23e710 op status=0 http_status=403 ======
2018-01-03 15:55:11.897109 7ff79aa45700 1 civetweb: 0x7ffaa404dfd0: 172.18.52.241 - - [03/Jan/2018:15:55:11 +0800] "POST /admin/realm/period HTTP/1.1" 403 0 - -
2018-01-03 15:55:11.900796 7ff79a244700 1 civetweb: 0x7ffaac053830: 172.18.52.241 - - [03/Jan/2018:15:55:11 +0800] "POST /admin/realm/period HTTP/1.1" 403 0 - -
2018-01-03 15:55:25.606730 7ff7b1c75700 0 ERROR: failed to take a lock on datalog.sync-status.f608ce2b-5584-45af-b0c5-f4896995bd22
2018-01-03 15:55:25.606744 7ff7b1c75700 0 ERROR: failed to init sync, retcode=-16



#附录命令:

systemctl enable ceph-radosgw@rgw.`hostname -s`
systemctl restart ceph-radosgw@rgw.`hostname -s`
systemctl status ceph-radosgw@rgw.`hostname -s`





##

#radosgw-admin realm delete --rgw-realm=imusic
#radosgw-admin zone delete --rgw-zone=imu-gzsx


#创建自定义pool映射bucket

@gzsx zone
ceph osd pool create imu-gzsx.rgw.buckets.video.data 512 512
ceph osd pool create imu-gzsx.rgw.buckets.video.index 64 64
ceph osd pool create imu-gzsx.rgw.buckets.video.extra 64 64

@shxxy zone
ceph osd pool create imu-shxxy.rgw.buckets.video.data 512 512
ceph osd pool create imu-shxxy.rgw.buckets.video.index 64 64
ceph osd pool create imu-shxxy.rgw.buckets.video.extra 64 64


#分别在2个zone导出 zonegroup 和zone :
#下面仅举例一个zone修改
radosgw-admin zonegroup get >zonegroup.conf.json
#修改
"placement_targets": [
{
"name": "default-placement",
"tags": []
},
{
"name": "video-placement",#增加local bucket
"tags": []
}
],
radosgw-admin zone get >zone.conf.json

#修改
"placement_pools": [
{
"key": "default-placement",
"val": {
"index_pool": "imu-gzsx.rgw.buckets.index",
"data_pool": "imu-gzsx.rgw.buckets.data",
"data_extra_pool": "imu-gzsx.rgw.buckets.non-ec",
"index_type": 0
}
},
{
"key": "My-bucket",
"val": {
"index_pool": "imu-gzsx.rgw.buckets.video.index",
"data_pool": "imu-gzsx.rgw.buckets.video.data",
"data_extra_pool": "imu-gzsx.rgw.buckets.video.extra",
"index_type": 0
}
}
],


#更新
radosgw-admin zonegroup set < zonegroup.conf.json
radosgw-admin zone set < zone.conf.json

#刷新配置
radosgw-admin period update --commit
#重启所有节点:
systemctl restart ceph-radosgw@rgw.`hostname -f`
systemctl status ceph-radosgw@rgw.`hostname -f`

#创建bucket
s3cmd -c Ymliu.cfg mb s3://video --bucket-location=:video-placement
#查看

radosgw-admin bucket stats --bucket=video

上传测试22MB 成功
查看rados 成功

    (1-1/1)