Project

General

Profile

Ovh » History » Version 33

Version 32 (Cron Tab, 07/22/2016 08:24 PM) → Version 33/36 (Cron Tab, 10/18/2016 04:43 PM)

h3. Crontab<pre>MAILTO="ceph-infra@redhat.com"
CEPH_QA_EMAIL="ceph-qa@ceph.com"
MACHINE_NAME="openstack"
FILTER_OUT="--filter-out=random.yaml\,async.yaml"

# Edit this file to introduce tasks to be run by cron.
#
# Each task to run has to be defined through a single line
# indicating with different fields when the task will be run
# and what command to run for the task
#
# To define the time you can provide concrete values for
# minute (m), hour (h), day of month (dom), month (mon),
# and day of week (dow) or use '*' in these fields (for 'any').#
# Notice that tasks will be started based on the cron's system
# daemon's notion of time and timezones.
#
# Output of the crontab jobs (including errors) is sent through
# email to the user the crontab file belongs to (unless redirected).
#
# For example, you can run a backup of all your user accounts
# at 5 a.m every week with:
# 0 5 * * 1 tar -zcf /var/backups/home.tgz /home/
#
# For more information see the manual pages of crontab(5) and cron(8)
#
# m h dom mon dow command

PATH=/home/teuthology/src/teuthology_master/virtualenv/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin

# Ensure ceph-sepia-secrets is up-to-date
*/5 * * * * cd /home/teuthology/ceph-sepia-secrets && /home/teuthology/bin/cron_wrapper git pull

#Publish this crontab to the Tracker page http://tracker.ceph.com/projects/ceph-releases/wiki/Ovh
00 6 * * * crontab=$(crontab -l | perl -p -e 's/&lt;/&lt;/g; s/&gt;/&gt;/g; s/&/&amp;/g') ; header=$(echo h3. Crontab ; echo) ; curl --verbose -X PUT --header 'Content-type: application/xml' --data-binary '&lt;?xml version="1.0"?&gt;&lt;wiki_page&gt;&lt;text&gt;'"$header"'&lt;pre&gt;'"$crontab"'&lt;/pre&gt;&lt;/text&gt;&lt;/wiki_page&gt;' http://tracker.ceph.com/projects/ceph-releases/wiki/ovh.xml?key=$(cat /etc/redmine-key)

#Publish this crontab to the Tracker page http://tracker.ceph.com/projects/ceph-releases/wiki/Sepia THIS SHOULD RUN IN SEPIA
#00 6 * * * crontab=$(crontab -l | perl -p -e 's/&lt;/&lt;/g; s/&gt;/&gt;/g; s/&/&amp;/g') ; header=$(echo h3. Crontab ; echo) ; curl --verbose -X PUT --header 'Content-type: application/xml' --data-binary '&lt;?xml version="1.0"?&gt;&lt;wiki_page&gt;&lt;text&gt;'"$header"'&lt;pre&gt;'"$crontab"'&lt;/pre&gt;&lt;/text&gt;&lt;/wiki_page&gt;' http://tracker.ceph.com/projects/ceph-releases/wiki/sepia.xml?key=$(cat /etc/redmine-key)

# Ensure teuthology is up-to-date
55 13 * * * cd /home/teuthology/src/teuthology_master && /home/teuthology/bin/cron_wrapper git pull
59 13 * * * cd /home/teuthology/src/ceph-qa-suite_master && /home/teuthology/bin/cron_wrapper git pull
# Ensure ceph-sepia-secrets is up-to-date not needed in ovh
#57 13 * * * cd /home/teuthology/ceph-sepia-secrets && git pull

#Powercycle tests - lets try it manually and get a green before we add it to the nightlies
#00 20 * * 2,4 teuthology-suite -v -c giant -k testing -m plana,burnupi,mira -s powercycle

#********** master branch
00 5 * * * /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -s smoke -k testing -p 70 -e $CEPH_QA_EMAIL ~/vps.yaml
10 5 */3 * * /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -k distro -s big -e $CEPH_QA_EMAIL ~/vps.yaml

#--filter-out rbd_cli_tests.yaml while waiting for a fix issue 14825
00 16 * * 1,6 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -s rbd -e $CEPH_QA_EMAIL ~/vps.yaml --filter-out rbd_cli_tests.yaml
#**** MOVED TO SEPIA b/c OVH issues START *****
#02 17 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s rgw -e $CEPH_QA_EMAIL ~/vps.yaml
#04 18 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL ~/vps.yaml
#08 20 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL ~/vps.yaml
#10 21 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL ~/vps.yaml
#12 22 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL ~/vps.yaml
#14 23 * * 1,6 teuthology-suite -v -c master -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL ~/vps.yaml
#**** MOVED TO SEPIA b/c OVH issues END *****
06 19 * * 1,3 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL ~/vps.yaml
16 19 * * 1,3 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c master -n 7 -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL ~/vps.yaml
#removed per Greg's request 18 23 * * 1,3 teuthology-suite -v -c master -m $MACHINE_NAME -k testing -s multimds -e $CEPH_QA_EMAIL
#*********

########################## infernalis ##################
### rados suite divided into 14 parts 1 part every day of the 2 weeks, the bc part does 0-6 on even weeks and 7-13 on odd
### % is escaped because it is a special character to chron
### format for filters /home/teuthology/bin/schedule_rados.sh 0 infernalis $MACHINE_NAME $CEPH_QA_EMAIL $FILTER_OUT

## REMOVED per leads call 00 02 * * 1 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 0 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
## REMOVED per leads call 00 02 * * 2 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 1 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
## REMOVED per leads call 00 02 * * 3 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 2 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
## REMOVED per leads call 00 02 * * 4 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 3 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
## REMOVED per leads call 00 02 * * 5 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 4 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
## REMOVED per leads call 00 02 * * 6 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 5 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
## REMOVED per leads call 00 02 * * 7 /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_rados.sh 6 infernalis $MACHINE_NAME $CEPH_QA_EMAIL
##########################

#--filter-out rbd_cli_tests.yaml while waiting for a fix issue 14825, qemu per Josh
## REMOVED per leads call 00 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL ~/vps.yaml --filter-out qemu
## REMOVED per leads call 00 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 05 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 10 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 15 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 20 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 25 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 30 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 35 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL ~/vps.yaml
#removed per Greg's request 18 23 * * 2 teuthology-suite -v -c infernalis -m $MACHINE_NAME -k testing -s multimds -e $CEPH_QA_EMAIL
## REMOVED per leads call 40 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s upgrade/client-upgrade -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 45 22 * * 7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -m $MACHINE_NAME -k distro -s ceph-deploy -e $CEPH_QA_EMAIL ~/vps.yaml

########################## jewel ##################
### rados suite divided into 14 parts 1 part every day of the 2 weeks, the bc part does 0-6 on even weeks and 7-13 on odd
### % is escaped because it is a special character to chron
### format for filters /home/teuthology/bin/schedule_rados.sh 0 infernalis $MACHINE_NAME $CEPH_QA_EMAIL $FILTER_OUT

#00 19 * * 1 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 0 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
#00 19 * * 2 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 1 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
#00 19 * * 3 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 2 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
#00 19 * * 4 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 3 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
#00 19 * * 5 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 4 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
#00 19 * * 6 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 5 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL
#00 19 * * 7 CEPH_BRANCH=jewel; /home/teuthology/bin/schedule_rados.sh 6 $CEPH_BRANCH $MACHINE_NAME $CEPH_QA_EMAIL

##########################

########################## jewel ##################

#00 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL
#02 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL
#04 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL
#06 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL
#08 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL
#10 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL
#12 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL
#14 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL
#16 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL
#removed per Greg's request 18 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k testing -s multimds -e $CEPH_QA_EMAIL
#20 23 * * 3,5 teuthology-suite -v -c jewel -m $MACHINE_NAME -k distro -s upgrade/client-upgrade -e $CEPH_QA_EMAIL
#10 23 * * 3,5 teuthology-sEPH_BRANCH=master; MACHINE_NAME=vps; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s smoke -k distro -p 70 -e $CEPH_QA_EMAIL

##### running off -t wip-13622-fix-wusui temporarily
##### ALL REMOVED EOL FIREFLY
#30 22 * * 1 teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s rados -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#00 23 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#02 23 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#04 23 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s fs -e $CEPH_QA_EMAILi -t wip-13622-fix-wusui
#06 23 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#08 23 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#10 23 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s samba --filter ubuntu_ -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#20 16 * * 2 teuthology-suite -v -c firefly -m $MACHINE_NAME -k distro -s ceph-deploy --filter ubuntu_ -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui
#05 17 * * 2 teuthology-suite -v -c firefly -k distro -m $MACHINE_NAME -s upgrade/firefly --filter ubuntu_ -e $CEPH_QA_EMAIL -t wip-13622-fix-wusui

##########################
### rados suite divided into 14 parts 1 part every day of the 2 weeks

### temporary running hammer rados on smithi per Sam's request as we observe many ENOSPC failures in ovh lab

#00 21 * * 1 /home/teuthology/bin/schedule_rados.sh 0 hammer $MACHINE_NAME $CEPH_QA_EMAIL
#00 21 * * 2 /home/teuthology/bin/schedule_rados.sh 1 hammer $MACHINE_NAME $CEPH_QA_EMAIL
#55 20 * * 3 /home/teuthology/bin/schedule_rados.sh 2 hammer $MACHINE_NAME $CEPH_QA_EMAIL
#00 21 * * 4 /home/teuthology/bin/schedule_rados.sh 3 hammer $MACHINE_NAME $CEPH_QA_EMAIL
#00 21 * * 5 /home/teuthology/bin/schedule_rados.sh 4 hammer $MACHINE_NAME $CEPH_QA_EMAIL
#00 21 * * 6 /home/teuthology/bin/schedule_rados.sh 5 hammer $MACHINE_NAME $CEPH_QA_EMAIL
#00 21 * * 7 /home/teuthology/bin/schedule_rados.sh 6 hammer $MACHINE_NAME $CEPH_QA_EMAIL

#--filter-out rbd_cli_tests.yaml while waiting for a fix issue 14825, qemu per Josh
00 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s rbd -e $CEPH_QA_EMAIL ~/vps.yaml --filter-out qemu
05 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s rgw -e $CEPH_QA_EMAIL ~/vps.yaml
10 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s fs -e $CEPH_QA_EMAIL ~/vps.yaml
15 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k testing -s krbd -e $CEPH_QA_EMAIL ~/vps.yaml
20 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k testing -s kcephfs -e $CEPH_QA_EMAIL ~/vps.yaml
25 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k testing -s knfs -e $CEPH_QA_EMAIL ~/vps.yaml
30 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL ~/vps.yaml
35 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL ~/vps.yaml
40 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -s rest -e $CEPH_QA_EMAIL ~/vps.yaml
#removed per Greg's request 18 18 * * 7 teuthology-suite -v -c hammer -m $MACHINE_NAME -s -k testing multimds -e $CEPH_QA_EMAIL
45 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s upgrade/client-upgrade -e $CEPH_QA_EMAIL ~/vps.yaml
50 19 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -m $MACHINE_NAME -k distro -s ceph-deploy -e $CEPH_QA_EMAIL ~/vps.yaml

######## UPGRADES
#STOPPED running 10 17 1 * * teuthology-suite -v -c hammer -k distro -m $MACHINE_NAME -s upgrade/dumpling-firefly-x --timeout 54000 -e $CEPH_QA_EMAIL ~/vps.yaml
05 17 * * 4 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -k distro -m $MACHINE_NAME -s upgrade/firefly-x -e $CEPH_QA_EMAIL ~/vps.yaml
15 17 * * 3,7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c hammer -k distro -m $MACHINE_NAME -s upgrade/hammer -e $CEPH_QA_EMAIL ~/vps.yaml

10 02 * * 1,3,5 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c jewel -k distro -m $MACHINE_NAME -s upgrade/hammer-x -e $CEPH_QA_EMAIL ~/vps.yaml
20 02 * * 1,3,5 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c jewel -k distro -m $MACHINE_NAME -s upgrade/infernalis-x -e $CEPH_QA_EMAIL ~/vps.yaml

#00 17 * * 2 teuthology-suite -v -c firefly -k distro -m $MACHINE_NAME -s upgrade/firefly -e $CEPH_QA_EMAIL
#13 19 * * 7 teuthology-suite -v -c firefly -k distro -m $MACHINE_NAME -s upgrade/dumpling-x -e $CEPH_QA_EMAIL

## REMOVED per leads call 20 17 * * 2,6 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -k distro -m $MACHINE_NAME -s upgrade/hammer-x -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 30 17 * * 2,6 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -k distro -m $MACHINE_NAME -s upgrade/firefly-hammer-x -e $CEPH_QA_EMAIL ~/vps.yaml
## REMOVED per leads call 40 17 * * 3,7 /home/teuthology/bin/cron_wrapper teuthology-suite -v -c infernalis -k distro -m $MACHINE_NAME -s upgrade/infernalis -e $CEPH_QA_EMAIL ~/vps.yaml
#Ansible: ceph-disk
#5 9 * * 2,4,6 #REMOVED teuthology-suite -v -c jewel -m openstack -k distro -s ceph-disk -e yweinste@redhat.com</pre>
Crontab<pre></pre>