Project

General

Profile

Ovh » History » Version 31

Cron Tab, 07/19/2016 06:00 AM

1 29 Cron Tab
h3. Crontab<pre>MAILTO="ceph-infra@redhat.com"
2
CEPH_QA_EMAIL="ceph-qa@ceph.com"
3
MACHINE_NAME="openstack"
4
FILTER_OUT="--filter-out=random.yaml\,async.yaml"
5
6
# Edit this file to introduce tasks to be run by cron.
7
# 
8
# Each task to run has to be defined through a single line
9
# indicating with different fields when the task will be run
10
# and what command to run for the task
11
# 
12
# To define the time you can provide concrete values for
13
# minute (m), hour (h), day of month (dom), month (mon),
14
# and day of week (dow) or use '*' in these fields (for 'any').# 
15
# Notice that tasks will be started based on the cron's system
16
# daemon's notion of time and timezones.
17
# 
18
# Output of the crontab jobs (including errors) is sent through
19
# email to the user the crontab file belongs to (unless redirected).
20
# 
21
# For example, you can run a backup of all your user accounts
22
# at 5 a.m every week with:
23
# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
24
# 
25
# For more information see the manual pages of crontab(5) and cron(8)
26
# 
27
# m h  dom mon dow   command
28
29
PATH=/home/teuthology/src/teuthology_master/virtualenv/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
30
31
# Ensure ceph-sepia-secrets is up-to-date
32 30 Cron Tab
*/5 * * * *   cd /home/teuthology/ceph-sepia-secrets && /home/teuthology/bin/cron_wrapper git pull
33 29 Cron Tab
34
35
#Publish this crontab to the Tracker page http://tracker.ceph.com/projects/ceph-releases/wiki/Ovh
36
00 6 * * *  crontab=$(crontab -l | perl -p -e 's/&lt;/&lt;/g; s/&gt;/&gt;/g; s/&/&amp;/g') ; header=$(echo h3. Crontab ; echo) ; curl --verbose -X PUT --header 'Content-type: application/xml' --data-binary '&lt;?xml version="1.0"?&gt;&lt;wiki_page&gt;&lt;text&gt;'"$header"'&lt;pre&gt;'"$crontab"'&lt;/pre&gt;&lt;/text&gt;&lt;/wiki_page&gt;' http://tracker.ceph.com/projects/ceph-releases/wiki/ovh.xml?key=$(cat /etc/redmine-key)
37
38
#Publish this crontab to the Tracker page http://tracker.ceph.com/projects/ceph-releases/wiki/Sepia THIS SHOULD RUN IN SEPIA
39
#00 6 * * *  crontab=$(crontab -l | perl -p -e 's/&lt;/&lt;/g; s/&gt;/&gt;/g; s/&/&amp;/g') ; header=$(echo h3. Crontab ; echo) ; curl --verbose -X PUT --header 'Content-type: application/xml' --data-binary '&lt;?xml version="1.0"?&gt;&lt;wiki_page&gt;&lt;text&gt;'"$header"'&lt;pre&gt;'"$crontab"'&lt;/pre&gt;&lt;/text&gt;&lt;/wiki_page&gt;' http://tracker.ceph.com/projects/ceph-releases/wiki/sepia.xml?key=$(cat /etc/redmine-key)
40
41
42
# Ensure teuthology is up-to-date
43
55 13 * * *   cd /home/teuthology/src/teuthology_master && /home/teuthology/bin/cron_wrapper git pull
44
59 13 * * *   cd /home/teuthology/src/ceph-qa-suite_master && /home/teuthology/bin/cron_wrapper git pull
45
# Ensure ceph-sepia-secrets is up-to-date not needed in ovh
46
#57 13 * * *   cd /home/teuthology/ceph-sepia-secrets && git pull
47
48
#Powercycle tests - lets try it manually and get a green before we add it to the nightlies
49
#00 20 * * 2,4 teuthology-suite -v -c giant -k testing -m plana,burnupi,mira -s powercycle
50
51
#********** master branch
52 31 Cron Tab
00 5 * * *   /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -s smoke -k testing -p 70 -e $CEPH_QA_EMAIL ~/vps.yaml
53
10 5 */3 * * /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -k distro -s big -e $CEPH_QA_EMAIL ~/vps.yaml
54 29 Cron Tab
55
#--filter-out rbd_cli_tests.yaml while waiting for a fix issue 14825
56 31 Cron Tab
00 16 * * 1,6 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master-n 7 -m $MACHINE_NAME -s rbd -e $CEPH_QA_EMAIL ~/vps.yaml --filter-out rbd_cli_tests.yaml
57 29 Cron Tab
#**** MOVED TO SEPIA b/c OVH issues START *****
58
#02 17 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s rgw -e $CEPH_QA_EMAIL ~/vps.yaml
59
#04 18 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL ~/vps.yaml
60
#08 20 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL ~/vps.yaml
61
#10 21 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL ~/vps.yaml
62
#12 22 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL ~/vps.yaml
63
#14 23 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL ~/vps.yaml
64
#**** MOVED TO SEPIA b/c OVH issues END *****
65 31 Cron Tab
06 19 * * 1,3 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL ~/vps.yaml
66
16 19 * * 1,3 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL ~/vps.yaml
67 29 Cron Tab
#removed per Greg's request 18 23 * * 1,3 teuthology-suite -v -c master -m $MACHINE_NAME -k testing -s multimds -e $CEPH_QA_EMAIL
68
#*********
69
70
71
########################## infernalis ##################
72
### rados suite divided into 14 parts 1 part every day of the 2 weeks, the bc part does 0-6 on even weeks and 7-13 on odd
73
### % is escaped because it is a special character to chron
74
### format for filters /home/teuthology/bin/schedule_rados.sh 0 infernalis $MACHINE_NAME $CEPH_QA_EMAIL $FILTER_OUT
75
76
77
00 02 * * 1 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 0 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
78
00 02 * * 2 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 1 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
79
00 02 * * 3 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 2 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
80
00 02 * * 4 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 3 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
81
00 02 * * 5 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 4 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
82
00 02 * * 6 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 5 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
83
00 02 * * 7 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 6 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
84
##########################
85
86
#--filter-out rbd_cli_tests.yaml while waiting for a fix issue 14825, qemu per Josh
87
00 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL ~/vps.yaml --filter-out qemu
88
00 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL ~/vps.yaml
89
05 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL ~/vps.yaml
90
10 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL ~/vps.yaml
91
15 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL ~/vps.yaml
92
20 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL ~/vps.yaml
93
25 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL ~/vps.yaml
94
30 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL ~/vps.yaml
95
35 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL ~/vps.yaml
96
#removed per Greg's request 18 23 * * 2 teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s multimds -e $CEPH_QA_EMAIL
97
40 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s upgrade/client-upgrade -e $CEPH_QA_EMAIL ~/vps.yaml
98
45 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s ceph-deploy -e $CEPH_QA_EMAIL ~/vps.yaml
99
100
########################## jewel ##################
101
### rados suite divided into 14 parts 1 part every day of the 2 weeks, the bc part does 0-6 on even weeks and 7-13 on odd
102
### % is escaped because it is a special character to chron
103
### format for filters /home/teuthology/bin/schedule_rados.sh 0 infernalis $MACHINE_NAME $CEPH_QA_EMAIL $FILTER_OUT
104
105
106
#00 19 * * 1 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 0 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
107
#00 19 * * 2 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 1 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
108
#00 19 * * 3 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 2 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
109
#00 19 * * 4 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 3 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
110
#00 19 * * 5 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 4 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
111
#00 19 * * 6 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 5 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
112
#00 19 * * 7 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 6 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
113
114
##########################
115
116
########################## jewel ##################
117
118
#00 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL
119
#02 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL
120
#04 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL
121
#06 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL
122
#08 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL
123
#10 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL
124
#12 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL
125
#14 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL
126
#16 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL
127
#removed per Greg's request 18 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s multimds -e $CEPH_QA_EMAIL
128
#20 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s upgrade/client-upgrade -e $CEPH_QA_EMAIL
129
#10 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s ceph-deploy -e $CEPH_QA_EMAIL
130
131
##### running off -t wip-13622-fix-wusui temporarily
132
##### ALL REMOVED EOL FIREFLY
133
#30 22 * * 1   teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s rados -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
134
#00 23 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
135
#02 23 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
136
#04 23 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s fs -e $CEPH_QA_EMAILi -t wip-13622-fix-wusui
137
#06 23 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
138
#08 23 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
139
#10 23 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s samba --filter ubuntu_ -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
140
#20 16 * * 2   teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s ceph-deploy --filter ubuntu_ -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
141
#05 17 * * 2   teuthology-suite -v -c firefly -k distro -m $MACHINE_NAME -s upgrade/firefly --filter ubuntu_ -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
142
143
##########################
144
### rados suite divided into 14 parts 1 part every day of the 2 weeks
145
146
### temporary running hammer rados on smithi per Sam's request as we observe many ENOSPC failures in ovh lab
147
148
#00 21 * * 1 /home/teuthology/bin/schedule_rados.sh 0 hammer $MACHINE_NAME $CEPH_QA_EMAIL
149
#00 21 * * 2 /home/teuthology/bin/schedule_rados.sh 1 hammer $MACHINE_NAME $CEPH_QA_EMAIL
150
#55 20 * * 3 /home/teuthology/bin/schedule_rados.sh 2 hammer $MACHINE_NAME $CEPH_QA_EMAIL
151
#00 21 * * 4 /home/teuthology/bin/schedule_rados.sh 3 hammer $MACHINE_NAME $CEPH_QA_EMAIL
152
#00 21 * * 5 /home/teuthology/bin/schedule_rados.sh 4 hammer $MACHINE_NAME $CEPH_QA_EMAIL
153
#00 21 * * 6 /home/teuthology/bin/schedule_rados.sh 5 hammer $MACHINE_NAME $CEPH_QA_EMAIL
154
#00 21 * * 7 /home/teuthology/bin/schedule_rados.sh 6 hammer $MACHINE_NAME $CEPH_QA_EMAIL
155
156
#--filter-out rbd_cli_tests.yaml while waiting for a fix issue 14825, qemu per Josh 
157
00 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL ~/vps.yaml --filter-out qemu
158
05 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL ~/vps.yaml
159
10 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL ~/vps.yaml
160
15 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL ~/vps.yaml
161
20 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL ~/vps.yaml
162
25 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL ~/vps.yaml
163
30 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL ~/vps.yaml
164
35 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL ~/vps.yaml
165
40 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL ~/vps.yaml
166
#removed per Greg's request 18 18 * * 7 teuthology-suite -v -c hammer -m $MACHINE_NAME -s -k testing multimds -e $CEPH_QA_EMAIL
167
45 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s upgrade/client-upgrade -e $CEPH_QA_EMAIL ~/vps.yaml
168
50 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s ceph-deploy -e $CEPH_QA_EMAIL ~/vps.yaml
169
170
######## UPGRADES
171
#STOPPED running 10 17 1 * *     teuthology-suite -v -c hammer -k distro -m $MACHINE_NAME -s upgrade/dumpling-firefly-x --timeout 54000 -e $CEPH_QA_EMAIL ~/vps.yaml
172
05 17 * * 4     /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -k distro -m $MACHINE_NAME -s upgrade/firefly-x -e $CEPH_QA_EMAIL ~/vps.yaml
173
15 17 * * 3,7   /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -k distro -m $MACHINE_NAME -s upgrade/hammer -e $CEPH_QA_EMAIL ~/vps.yaml
174
175
10 02 * * 1,3,5 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c jewel -k distro -m $MACHINE_NAME -s upgrade/hammer-x -e $CEPH_QA_EMAIL ~/vps.yaml
176
20 02 * * 1,3,5 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c jewel -k distro -m $MACHINE_NAME -s upgrade/infernalis-x -e $CEPH_QA_EMAIL ~/vps.yaml
177
178
#00 17 * * 2     teuthology-suite -v -c firefly -k distro -m $MACHINE_NAME -s upgrade/firefly -e $CEPH_QA_EMAIL
179
#13 19 * * 7     teuthology-suite -v -c firefly -k distro -m $MACHINE_NAME -s upgrade/dumpling-x -e $CEPH_QA_EMAIL
180
181
20 17 * * 2,6 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -k distro -m $MACHINE_NAME -s upgrade/hammer-x -e $CEPH_QA_EMAIL ~/vps.yaml
182
30 17 * * 2,6 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -k distro -m $MACHINE_NAME -s upgrade/firefly-hammer-x -e $CEPH_QA_EMAIL ~/vps.yaml
183
40 17 * * 3,7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -k distro -m $MACHINE_NAME -s upgrade/infernalis -e $CEPH_QA_EMAIL ~/vps.yaml
184
#Ansible: ceph-disk
185
#5 9 * * 2,4,6 #REMOVED teuthology-suite -v -c jewel -m openstack -k distro -s ceph-disk -e yweinste@redhat.com</pre>