evobackup/zzz_evobackup

335 lines
13 KiB
Bash
Executable File
Raw Blame History

This file contains invisible Unicode characters

This file contains invisible Unicode characters that are indistinguishable to humans but may be processed differently by a computer. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#!/bin/sh
#
# Script Evobackup client
# See https://gitea.evolix.org/evolix/evobackup
#
# Author: Gregory Colpart <reg@evolix.fr>
# Contributors:
# Romain Dessort <rdessort@evolix.fr>
# Benoît Série <bserie@evolix.fr>
# Tristan Pilat <tpilat@evolix.fr>
# Victor Laborie <vlaborie@evolix.fr>
# Jérémy Lecour <jlecour@evolix.fr>
#
# Licence: AGPLv3
#
# /!\ DON'T FORGET TO SET "MAIL" and "SERVERS" VARIABLES
##### Configuration ###################################################
# email adress for notifications
MAIL=jdoe@example.com
# list of hosts (hostname or IP) and SSH port for Rsync
SERVERS="node0.backup.example.com:2XXX node1.backup.example.com:2XXX"
# timeout (in seconds) for the SSH test
SSH_CONNECT_TIMEOUT=30
# You can set "linux" or "bsd" manually or let it choose automatically
SYSTEM=$(uname | tr '[:upper:]' '[:lower:]')
##### SETUP AND FUNCTIONS #############################################
# shellcheck disable=SC2174
mkdir -p -m 700 /home/backup
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/sbin:/usr/local/bin
## lang = C for english outputs
export LANGUAGE=C
export LANG=C
## Force umask
umask 077
# Call test_server with "HOST:PORT" string
# It will return with 0 if the server is reachable.
# It will return with 1 and a message on stderr if not.
test_server() {
item=$1
# split HOST and PORT from the input string
host=$(echo "${item}" | cut -d':' -f1)
port=$(echo "${item}" | cut -d':' -f2)
# Test if the server is accepting connections
ssh -q -o "ConnectTimeout ${SSH_CONNECT_TIMEOUT}" "${host}" -p "${port}" -t "exit"
# shellcheck disable=SC2181
if [ $? = 0 ]; then
# SSH connection is OK
return 0
else
# SSH connection failed
echo "Failed to connect to \`${item}' within ${SSH_CONNECT_TIMEOUT} seconds" >&2
return 1
fi
}
# Call pick_server with an optional positive integer to get the nth server in the list.
pick_server() {
increment=${1:-0}
list_length=$(echo "${SERVERS}" | wc -w)
if [ "${increment}" -ge "${list_length}" ]; then
# We've reached the end of the list
echo "No more server available" >&2
return 1
fi
# A salt is useful to randomize the starting point in the list
# but stay identical each time it's called for a server (based on hostname).
salt=$(hostname | cksum | cut -d' ' -f1)
# Pick an integer between 0 and the length of the SERVERS list
# It changes each day
today=10#$(date +%d)
item=$(( ($today + salt + increment) % list_length ))
# cut starts counting fields at 1, not 0.
field=$(( item + 1 ))
echo "${SERVERS}" | cut -d' ' -f${field}
}
## Verify other evobackup process and kill if needed
PIDFILE=/var/run/evobackup.pid
if [ -e $PIDFILE ]; then
pid=$(cat "$PIDFILE")
# Killing the childs of evobackup.
for ppid in $(ps h --ppid "$pid" -o pid | tr -s '\n' ' '); do
kill -9 "$ppid";
done
# Then kill the main PID.
kill -9 "$pid"
echo "$0 is still running (PID $pid). Process has been killed" >&2
fi
echo "$$" > $PIDFILE
# shellcheck disable=SC2064
trap "rm -f $PIDFILE" EXIT
##### LOCAL BACKUP ####################################################
# You can comment or uncomment sections below to customize the backup
## OpenLDAP : example with slapcat
# slapcat -l /home/backup/ldap.bak
### MySQL
## example with global and compressed mysqldump
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \
# --opt --all-databases --force --events --hex-blob | gzip --best > /home/backup/mysql.bak.gz
## example with two dumps for each table (.sql/.txt) for all databases
# for i in $(echo SHOW DATABASES | mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \
# | egrep -v "^(Database|information_schema|performance_schema|sys)" ); \
# do mkdir -p -m 700 /home/mysqldump/$i ; chown -RL mysql /home/mysqldump ; \
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments \
# --fields-enclosed-by='\"' --fields-terminated-by=',' -T /home/mysqldump/$i $i; done
## example with compressed SQL dump for each databases
# mkdir -p -m 700 /home/mysqldump/
# for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \
# | egrep -v "^(Database|information_schema|performance_schema|sys)"); do
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob $i | gzip --best > /home/mysqldump/${i}.sql.gz
# done
## example with *one* uncompressed SQL dump for *one* database (MYBASE)
# mkdir -p -m 700 /home/mysqldump/MYBASE
# chown -RL mysql /home/mysqldump/
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -Q \
# --opt --events --hex-blob --skip-comments -T /home/mysqldump/MYBASE MYBASE
## example with mysqlhotcopy
# mkdir -p -m 700 /home/mysqlhotcopy/
# mysqlhotcopy BASE /home/mysqlhotcopy/
## example for multiples MySQL instances
# mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3)
# grep -E "^port\s*=\s*\d*" /etc/mysql/my.cnf |while read instance; do
# instance=$(echo "$instance"|awk '{ print $3 }')
# if [ "$instance" != "3306" ]
# then
# mysqldump -P $instance --opt --all-databases --hex-blob -u mysqladmin -p$mysqladminpasswd > /home/backup/mysql.$instance.bak
# fi
# done
### PostgreSQL
## example with pg_dumpall (warning: you need space in ~postgres)
# su - postgres -c "pg_dumpall > ~/pg.dump.bak"
# mv ~postgres/pg.dump.bak /home/backup/
## another method with gzip directly piped
# cd /var/lib/postgresql
# sudo -u postgres pg_dumpall | gzip > /home/backup/pg.dump.bak.gz
# cd - > /dev/null
## example with all tables from MYBASE excepts TABLE1 and TABLE2
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f /home/backup/pg-backup.tar -t 'TABLE1' -t 'TABLE2' MYBASE
## example with only TABLE1 and TABLE2 from MYBASE
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f /home/backup/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE
## MongoDB : example with mongodump
## don't forget to create use with read-only access
## > use admin
## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } )
# test -d /home/backup/mongodump/ && rm -rf /home/backup/mongodump/
# mkdir -p -m 700 /home/backup/mongodump/
# mongodump --quiet -u mongobackup -pPASS -o /home/backup/mongodump/
# if [ $? -ne 0 ]; then
# echo "Error with mongodump!"
# fi
## Redis : example with copy .rdb file
# cp /var/lib/redis/dump.rdb /home/backup/
## ElasticSearch, take a snapshot as a backup.
## Warning: You need to have a path.repo configured.
## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log
## Clustered version here
## It basically the same thing except that you need to check that NFS is mounted
# if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2'
# then
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log
# else
# echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.'
# fi
## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by :
# for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}'
# done
# date=$(date +%F)
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" -o /tmp/es_snapshot_${date}.log
## RabbitMQ : export config
#rabbitmqadmin export /home/backup/rabbitmq.config >> /var/log/evobackup.log
## Dump MBR / table partitions with dd and sfdisk
## Linux
#for disk in $(ls /dev/[sv]d[a-z] 2>/dev/null); do
# name=$(basename $disk)
# dd if=$disk of=/home/backup/MBR-$name bs=512 count=1 2>&1 | egrep -v "(records in|records out|512 bytes)"
# fdisk -l $disk > /home/backup/partitions-$name
#done
#cat /home/backup/partitions-* > /home/backup/partitions
## OpenBSD
# disklabel sd0 > /home/backup/partitions
# backup MegaCli config
#megacli -CfgSave -f /home/backup/megacli_conf.dump -a0 >/dev/null
## Dump system and kernel versions
uname -a > /home/backup/uname
## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls)
for addr in 8.8.8.8 www.evolix.fr travaux.evolix.net; do
mtr -r $addr > /home/backup/mtr-${addr}
traceroute -n $addr > /home/backup/traceroute-${addr} 2>&1
done
## Dump process with ps
ps auwwx >/home/backup/ps.out
if [ "$SYSTEM" = "linux" ]; then
## Dump network connections with netstat
netstat -taupen >/home/backup/netstat.out
## List Debian packages
dpkg -l >/home/backup/packages
dpkg --get-selections >/home/backup/packages.getselections
apt-cache dumpavail >/home/backup/packages.available
else
## Dump network connections with netstat
netstat -finet -atn >/home/backup/netstat.out
## List OpenBSD packages
pkg_info -m >/home/backup/packages
fi
##### REMOTE BACKUP ###################################################
n=0
server=""
while :; do
server=$(pick_server "${n}")
test $? = 0 || exit 2
if test_server "${server}"; then
break
else
server=""
n=$(( n + 1 ))
fi
done
SSH_SERVER=$(echo "${server}" | cut -d':' -f1)
SSH_PORT=$(echo "${server}" | cut -d':' -f2)
HOSTNAME=$(hostname)
BEGINNING=$(/bin/date +"%d-%m-%Y ; %H:%M")
if [ "$SYSTEM" = "linux" ]; then
rep="/bin /boot /lib /opt /sbin /usr"
else
rep="/bsd /bin /sbin /usr"
fi
# /!\ DO NOT USE COMMENTS in the rsync command /!\
# It breaks the command and destroys data, simply remove (or add) lines.
rsync -avzh --stats --delete --delete-excluded --force --ignore-errors --partial \
--exclude "lost+found" \
--exclude ".nfs.*" \
--exclude "/var/log" \
--exclude "/var/log/evobackup*" \
--exclude "/var/lib/mysql" \
--exclude "/var/lib/postgres" \
--exclude "/var/lib/postgresql" \
--exclude "/var/lib/sympa" \
--exclude "/var/lib/metche" \
--exclude "/var/run" \
--exclude "/var/lock" \
--exclude "/var/state" \
--exclude "/var/apt" \
--exclude "/var/cache" \
--exclude "/usr/src" \
--exclude "/usr/doc" \
--exclude "/usr/share/doc" \
--exclude "/usr/obj" \
--exclude "dev" \
--exclude "/var/spool/postfix" \
--exclude "/var/lib/amavis/amavisd.sock" \
--exclude "/var/lib/munin/*tmp*" \
--exclude "/var/lib/php5" \
--exclude "/var/spool/squid" \
--exclude "/var/lib/elasticsearch" \
--exclude "/var/lib/amavis/tmp" \
--exclude "/var/lib/clamav/*.tmp" \
--exclude "/home/mysqltmp" \
--exclude "/var/lib/php/sessions" \
$rep \
/etc \
/root \
/var \
/home \
/srv \
-e "ssh -p $SSH_PORT" \
"root@$SSH_SERVER:/var/backup/" \
| tail -30 >> /var/log/evobackup.log
END=$(/bin/date +"%d-%m-%Y ; %H:%M")
##### REPORTING #######################################################
echo "EvoBackup - $HOSTNAME - START $BEGINNING" \
>> /var/log/evobackup.log
echo "EvoBackup - $HOSTNAME - STOP $END" \
>> /var/log/evobackup.log
tail -10 /var/log/evobackup.log | \
mail -s "[info] EvoBackup - Client $HOSTNAME" \
$MAIL