From 242153b4728e43c3f4af76888dd9ffefdf256ffa Mon Sep 17 00:00:00 2001 From: Jeremy Lecour Date: Thu, 22 Aug 2019 14:50:20 +0200 Subject: [PATCH] Make local and sync tasks skipable. --- zzz_evobackup | 344 ++++++++++++++++++++++++++------------------------ 1 file changed, 178 insertions(+), 166 deletions(-) diff --git a/zzz_evobackup b/zzz_evobackup index 34e74f4..95cdf74 100755 --- a/zzz_evobackup +++ b/zzz_evobackup @@ -36,8 +36,14 @@ SYSTEM=$(uname | tr '[:upper:]' '[:lower:]') PIDFILE="/var/run/evobackup.pid" LOGFILE="/var/log/evobackup.log" +## Enable/Disable tasks +LOCAL_TASKS=1 +SYNC_TASKS=1 + ##### SETUP AND FUNCTIONS ############################################# +BEGINNING=$(/bin/date +"%d-%m-%Y ; %H:%M") + # shellcheck disable=SC2174 mkdir -p -m 700 ${LOCAL_BACKUP_DIR} @@ -121,146 +127,149 @@ trap "rm -f ${PIDFILE}" EXIT ##### LOCAL BACKUP #################################################### -# You can comment or uncomment sections below to customize the backup +if [ "${LOCAL_TASKS}" = "1" ]; then + # You can comment or uncomment sections below to customize the backup -## OpenLDAP : example with slapcat -# slapcat -l ${LOCAL_BACKUP_DIR}/ldap.bak + ## OpenLDAP : example with slapcat + # slapcat -l ${LOCAL_BACKUP_DIR}/ldap.bak -### MySQL + ### MySQL -## example with global and compressed mysqldump -# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ -# --opt --all-databases --force --events --hex-blob | gzip --best > ${LOCAL_BACKUP_DIR}/mysql.bak.gz + ## example with global and compressed mysqldump + # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ + # --opt --all-databases --force --events --hex-blob | gzip --best > ${LOCAL_BACKUP_DIR}/mysql.bak.gz -## example with two dumps for each table (.sql/.txt) for all databases -# for i in $(echo SHOW DATABASES | mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ -# | egrep -v "^(Database|information_schema|performance_schema|sys)" ); \ -# do mkdir -p -m 700 /home/mysqldump/$i ; chown -RL mysql /home/mysqldump ; \ -# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments \ -# --fields-enclosed-by='\"' --fields-terminated-by=',' -T /home/mysqldump/$i $i; done + ## example with two dumps for each table (.sql/.txt) for all databases + # for i in $(echo SHOW DATABASES | mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ + # | egrep -v "^(Database|information_schema|performance_schema|sys)" ); \ + # do mkdir -p -m 700 /home/mysqldump/$i ; chown -RL mysql /home/mysqldump ; \ + # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments \ + # --fields-enclosed-by='\"' --fields-terminated-by=',' -T /home/mysqldump/$i $i; done -## example with compressed SQL dump for each databases -# mkdir -p -m 700 /home/mysqldump/ -# for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \ -# | egrep -v "^(Database|information_schema|performance_schema|sys)"); do -# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob $i | gzip --best > /home/mysqldump/${i}.sql.gz -# done + ## example with compressed SQL dump for each databases + # mkdir -p -m 700 /home/mysqldump/ + # for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \ + # | egrep -v "^(Database|information_schema|performance_schema|sys)"); do + # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob $i | gzip --best > /home/mysqldump/${i}.sql.gz + # done -## example with *one* uncompressed SQL dump for *one* database (MYBASE) -# mkdir -p -m 700 /home/mysqldump/MYBASE -# chown -RL mysql /home/mysqldump/ -# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -Q \ -# --opt --events --hex-blob --skip-comments -T /home/mysqldump/MYBASE MYBASE + ## example with *one* uncompressed SQL dump for *one* database (MYBASE) + # mkdir -p -m 700 /home/mysqldump/MYBASE + # chown -RL mysql /home/mysqldump/ + # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -Q \ + # --opt --events --hex-blob --skip-comments -T /home/mysqldump/MYBASE MYBASE -## example with mysqlhotcopy -# mkdir -p -m 700 /home/mysqlhotcopy/ -# mysqlhotcopy BASE /home/mysqlhotcopy/ + ## example with mysqlhotcopy + # mkdir -p -m 700 /home/mysqlhotcopy/ + # mysqlhotcopy BASE /home/mysqlhotcopy/ -## example for multiples MySQL instances -# mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3) -# grep -E "^port\s*=\s*\d*" /etc/mysql/my.cnf |while read instance; do -# instance=$(echo "$instance"|awk '{ print $3 }') -# if [ "$instance" != "3306" ] -# then -# mysqldump -P $instance --opt --all-databases --hex-blob -u mysqladmin -p$mysqladminpasswd > ${LOCAL_BACKUP_DIR}/mysql.$instance.bak -# fi -# done + ## example for multiples MySQL instances + # mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3) + # grep -E "^port\s*=\s*\d*" /etc/mysql/my.cnf |while read instance; do + # instance=$(echo "$instance"|awk '{ print $3 }') + # if [ "$instance" != "3306" ] + # then + # mysqldump -P $instance --opt --all-databases --hex-blob -u mysqladmin -p$mysqladminpasswd > ${LOCAL_BACKUP_DIR}/mysql.$instance.bak + # fi + # done -### PostgreSQL + ### PostgreSQL -## example with pg_dumpall (warning: you need space in ~postgres) -# su - postgres -c "pg_dumpall > ~/pg.dump.bak" -# mv ~postgres/pg.dump.bak ${LOCAL_BACKUP_DIR}/ -## another method with gzip directly piped -# cd /var/lib/postgresql -# sudo -u postgres pg_dumpall | gzip > ${LOCAL_BACKUP_DIR}/pg.dump.bak.gz -# cd - > /dev/null + ## example with pg_dumpall (warning: you need space in ~postgres) + # su - postgres -c "pg_dumpall > ~/pg.dump.bak" + # mv ~postgres/pg.dump.bak ${LOCAL_BACKUP_DIR}/ + ## another method with gzip directly piped + # cd /var/lib/postgresql + # sudo -u postgres pg_dumpall | gzip > ${LOCAL_BACKUP_DIR}/pg.dump.bak.gz + # cd - > /dev/null -## example with all tables from MYBASE excepts TABLE1 and TABLE2 -# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -t 'TABLE1' -t 'TABLE2' MYBASE + ## example with all tables from MYBASE excepts TABLE1 and TABLE2 + # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -t 'TABLE1' -t 'TABLE2' MYBASE -## example with only TABLE1 and TABLE2 from MYBASE -# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE + ## example with only TABLE1 and TABLE2 from MYBASE + # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE -## MongoDB : example with mongodump -## don't forget to create use with read-only access -## > use admin -## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } ) -# test -d ${LOCAL_BACKUP_DIR}/mongodump/ && rm -rf ${LOCAL_BACKUP_DIR}/mongodump/ -# mkdir -p -m 700 ${LOCAL_BACKUP_DIR}/mongodump/ -# mongodump --quiet -u mongobackup -pPASS -o ${LOCAL_BACKUP_DIR}/mongodump/ -# if [ $? -ne 0 ]; then -# echo "Error with mongodump!" -# fi + ## MongoDB : example with mongodump + ## don't forget to create use with read-only access + ## > use admin + ## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } ) + # test -d ${LOCAL_BACKUP_DIR}/mongodump/ && rm -rf ${LOCAL_BACKUP_DIR}/mongodump/ + # mkdir -p -m 700 ${LOCAL_BACKUP_DIR}/mongodump/ + # mongodump --quiet -u mongobackup -pPASS -o ${LOCAL_BACKUP_DIR}/mongodump/ + # if [ $? -ne 0 ]; then + # echo "Error with mongodump!" + # fi -## Redis : example with copy .rdb file -# cp /var/lib/redis/dump.rdb ${LOCAL_BACKUP_DIR}/ + ## Redis : example with copy .rdb file + # cp /var/lib/redis/dump.rdb ${LOCAL_BACKUP_DIR}/ -## ElasticSearch, take a snapshot as a backup. -## Warning: You need to have a path.repo configured. -## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes -# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log -# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log -## Clustered version here -## It basically the same thing except that you need to check that NFS is mounted -# if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2' -# then -# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log -# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log -# else -# echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.' -# fi -## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by : -# for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do -# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}' -# done -# date=$(date +%F) -# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" -o /tmp/es_snapshot_${date}.log + ## ElasticSearch, take a snapshot as a backup. + ## Warning: You need to have a path.repo configured. + ## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes + # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log + # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log + ## Clustered version here + ## It basically the same thing except that you need to check that NFS is mounted + # if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2' + # then + # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log + # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log + # else + # echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.' + # fi + ## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by : + # for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do + # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}' + # done + # date=$(date +%F) + # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" -o /tmp/es_snapshot_${date}.log -## RabbitMQ : export config -#rabbitmqadmin export ${LOCAL_BACKUP_DIR}/rabbitmq.config >> $LOGFILE + ## RabbitMQ : export config + #rabbitmqadmin export ${LOCAL_BACKUP_DIR}/rabbitmq.config >> $LOGFILE -# backup MegaCli config -#megacli -CfgSave -f ${LOCAL_BACKUP_DIR}/megacli_conf.dump -a0 >/dev/null + # backup MegaCli config + #megacli -CfgSave -f ${LOCAL_BACKUP_DIR}/megacli_conf.dump -a0 >/dev/null -## Dump system and kernel versions -uname -a > ${LOCAL_BACKUP_DIR}/uname + ## Dump system and kernel versions + uname -a > ${LOCAL_BACKUP_DIR}/uname -## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls) -for addr in 8.8.8.8 www.evolix.fr travaux.evolix.net; do - mtr -r ${addr} > ${LOCAL_BACKUP_DIR}/mtr-${addr} - traceroute -n ${addr} > ${LOCAL_BACKUP_DIR}/traceroute-${addr} 2>&1 -done - -## Dump process with ps -ps auwwx >${LOCAL_BACKUP_DIR}/ps.out - -if [ "${SYSTEM}" = "linux" ]; then - ## Dump network connections with ss - ss -taupen > ${LOCAL_BACKUP_DIR}/netstat.out - - ## List Debian packages - dpkg -l > ${LOCAL_BACKUP_DIR}/packages - dpkg --get-selections > ${LOCAL_BACKUP_DIR}/packages.getselections - apt-cache dumpavail > ${LOCAL_BACKUP_DIR}/packages.available - - ## Dump MBR / table partitions - disks=$(find /dev/ -regex '/dev/\([sv]d[a-z]\|nvme[0-9]+n[0-9]+\)') - for disk in ${disks}; do - name=$(basename "${disk}") - dd if="${disk}" of="${LOCAL_BACKUP_DIR}/MBR-${name}" bs=512 count=1 2>&1 | egrep -v "(records in|records out|512 bytes)" - fdisk -l "${disk}" > "${LOCAL_BACKUP_DIR}/partitions-${name}" + ## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls) + for addr in 8.8.8.8 www.evolix.fr travaux.evolix.net; do + mtr -r ${addr} > ${LOCAL_BACKUP_DIR}/mtr-${addr} + traceroute -n ${addr} > ${LOCAL_BACKUP_DIR}/traceroute-${addr} 2>&1 done - cat ${LOCAL_BACKUP_DIR}/partitions-* > ${LOCAL_BACKUP_DIR}/partitions -else - ## Dump network connections with netstat - netstat -finet -atn > ${LOCAL_BACKUP_DIR}/netstat.out - ## List OpenBSD packages - pkg_info -m > ${LOCAL_BACKUP_DIR}/packages + ## Dump process with ps + ps auwwx >${LOCAL_BACKUP_DIR}/ps.out + + if [ "${SYSTEM}" = "linux" ]; then + ## Dump network connections with ss + ss -taupen > ${LOCAL_BACKUP_DIR}/netstat.out + + ## List Debian packages + dpkg -l > ${LOCAL_BACKUP_DIR}/packages + dpkg --get-selections > ${LOCAL_BACKUP_DIR}/packages.getselections + apt-cache dumpavail > ${LOCAL_BACKUP_DIR}/packages.available + + ## Dump MBR / table partitions + disks=$(find /dev/ -regex '/dev/\([sv]d[a-z]\|nvme[0-9]+n[0-9]+\)') + for disk in ${disks}; do + name=$(basename "${disk}") + dd if="${disk}" of="${LOCAL_BACKUP_DIR}/MBR-${name}" bs=512 count=1 2>&1 | egrep -v "(records in|records out|512 bytes)" + fdisk -l "${disk}" > "${LOCAL_BACKUP_DIR}/partitions-${name}" + done + cat ${LOCAL_BACKUP_DIR}/partitions-* > ${LOCAL_BACKUP_DIR}/partitions + else + ## Dump network connections with netstat + netstat -finet -atn > ${LOCAL_BACKUP_DIR}/netstat.out + + ## List OpenBSD packages + pkg_info -m > ${LOCAL_BACKUP_DIR}/packages + + ## Dump MBR / table partitions + ##disklabel sd0 > ${LOCAL_BACKUP_DIR}/partitions + fi - ## Dump MBR / table partitions - ##disklabel sd0 > ${LOCAL_BACKUP_DIR}/partitions fi ##### REMOTE BACKUP ################################################### @@ -284,66 +293,69 @@ SSH_PORT=$(echo "${server}" | cut -d':' -f2) HOSTNAME=$(hostname) -BEGINNING=$(/bin/date +"%d-%m-%Y ; %H:%M") - if [ "${SYSTEM}" = "linux" ]; then rep="/bin /boot /lib /opt /sbin /usr" else rep="/bsd /bin /sbin /usr" fi -# /!\ DO NOT USE COMMENTS in the rsync command /!\ -# It breaks the command and destroys data, simply remove (or add) lines. -rsync -avzh --stats --delete --delete-excluded --force --ignore-errors --partial \ - --exclude "lost+found" \ - --exclude ".nfs.*" \ - --exclude "/var/log" \ - --exclude "/var/log/evobackup*" \ - --exclude "/var/lib/mysql" \ - --exclude "/var/lib/postgres" \ - --exclude "/var/lib/postgresql" \ - --exclude "/var/lib/sympa" \ - --exclude "/var/lib/metche" \ - --exclude "/var/run" \ - --exclude "/var/lock" \ - --exclude "/var/state" \ - --exclude "/var/apt" \ - --exclude "/var/cache" \ - --exclude "/usr/src" \ - --exclude "/usr/doc" \ - --exclude "/usr/share/doc" \ - --exclude "/usr/obj" \ - --exclude "dev" \ - --exclude "/var/spool/postfix" \ - --exclude "/var/lib/amavis/amavisd.sock" \ - --exclude "/var/lib/munin/*tmp*" \ - --exclude "/var/lib/php5" \ - --exclude "/var/spool/squid" \ - --exclude "/var/lib/elasticsearch" \ - --exclude "/var/lib/amavis/tmp" \ - --exclude "/var/lib/clamav/*.tmp" \ - --exclude "/home/mysqltmp" \ - --exclude "/var/lib/php/sessions" \ - ${rep} \ - /etc \ - /root \ - /var \ - /home \ - /srv \ - -e "ssh -p ${SSH_PORT}" \ - "root@${SSH_SERVER}:/var/backup/" \ - | tail -30 >> $LOGFILE +if [ "${SYNC_TASKS}" = "1" ]; then + # /!\ DO NOT USE COMMENTS in the rsync command /!\ + # It breaks the command and destroys data, simply remove (or add) lines. -END=$(/bin/date +"%d-%m-%Y ; %H:%M") + rsync -avzh --stats --delete --delete-excluded --force --ignore-errors --partial \ + --exclude "lost+found" \ + --exclude ".nfs.*" \ + --exclude "/var/log" \ + --exclude "/var/log/evobackup*" \ + --exclude "/var/lib/mysql" \ + --exclude "/var/lib/postgres" \ + --exclude "/var/lib/postgresql" \ + --exclude "/var/lib/sympa" \ + --exclude "/var/lib/metche" \ + --exclude "/var/run" \ + --exclude "/var/lock" \ + --exclude "/var/state" \ + --exclude "/var/apt" \ + --exclude "/var/cache" \ + --exclude "/usr/src" \ + --exclude "/usr/doc" \ + --exclude "/usr/share/doc" \ + --exclude "/usr/obj" \ + --exclude "dev" \ + --exclude "/var/spool/postfix" \ + --exclude "/var/lib/amavis/amavisd.sock" \ + --exclude "/var/lib/munin/*tmp*" \ + --exclude "/var/lib/php5" \ + --exclude "/var/spool/squid" \ + --exclude "/var/lib/elasticsearch" \ + --exclude "/var/lib/amavis/tmp" \ + --exclude "/var/lib/clamav/*.tmp" \ + --exclude "/home/mysqltmp" \ + --exclude "/var/lib/php/sessions" \ + ${rep} \ + /etc \ + /root \ + /var \ + /home \ + /srv \ + -e "ssh -p ${SSH_PORT}" \ + "root@${SSH_SERVER}:/var/backup/" \ + | tail -30 >> $LOGFILE +fi ##### REPORTING ####################################################### -printf "EvoBackup - %s - START %s ON %s\n" "${HOSTNAME}" "${BEGINNING}" "${SSH_SERVER}" \ - >> $LOGFILE +END=$(/bin/date +"%d-%m-%Y ; %H:%M") -printf "EvoBackup - %s - STOP %s ON %s\n" "${HOSTNAME}" "${END}" "${SSH_SERVER}" \ - >> $LOGFILE +printf "EvoBackup - %s - START %s ON %s (LOCAL_TASKS=%s SYNC_TASKS=%s)\n" \ + "${HOSTNAME}" "${BEGINNING}" "${SSH_SERVER}" "${LOCAL_TASKS}" "${SYNC_TASKS}" \ + >> $LOGFILE + +printf "EvoBackup - %s - STOP %s ON %s (LOCAL_TASKS=%s SYNC_TASKS=%s)\n" \ + "${HOSTNAME}" "${END}" "${SSH_SERVER}" "${LOCAL_TASKS}" "${SYNC_TASKS}" \ + >> $LOGFILE tail -10 $LOGFILE | \ mail -s "[info] EvoBackup - Client ${HOSTNAME}" \