Jérémy Lecour
b5f1e13685
SERVERS contains 1 or more servers to send backup files to. Each day a primary backup server is chosen. If it's not available the script falls back to the next server, and the next…
307 lines
11 KiB
Bash
Executable file
307 lines
11 KiB
Bash
Executable file
#!/bin/sh
|
||
#
|
||
# Script Evobackup client
|
||
# See https://gitea.evolix.org/evolix/evobackup
|
||
#
|
||
# Author: Gregory Colpart <reg@evolix.fr>
|
||
# Contributors:
|
||
# Romain Dessort <rdessort@evolix.fr>
|
||
# Benoît Série <bserie@evolix.fr>
|
||
# Tristan Pilat <tpilat@evolix.fr>
|
||
# Victor Laborie <vlaborie@evolix.fr>
|
||
# Jérémy Lecour <jlecour@evolix.fr>
|
||
#
|
||
# Licence: AGPLv3
|
||
#
|
||
# The following variables must be changed:
|
||
# MAIL: The email address to send notifications to.
|
||
# SERVERS: The list of hosts (hostname or IP address) and SSH port
|
||
# to send backup files to.
|
||
#
|
||
# You must then uncomment the various examples that best suit your case.#
|
||
|
||
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/sbin:/usr/local/bin
|
||
|
||
## lang = C for english outputs
|
||
export LANGUAGE=C
|
||
export LANG=C
|
||
|
||
## Force umask
|
||
umask 077
|
||
|
||
## Verify other evobackup process and kill if needed
|
||
PIDFILE=/var/run/evobackup.pid
|
||
if [ -e $PIDFILE ]; then
|
||
pid=$(cat "$PIDFILE")
|
||
# Killing the childs of evobackup.
|
||
for ppid in $(ps h --ppid "$pid" -o pid | tr -s '\n' ' '); do
|
||
kill -9 "$ppid";
|
||
done
|
||
# Then kill the main PID.
|
||
kill -9 "$pid"
|
||
echo "$0 tourne encore (PID $pid). Processus killé" >&2
|
||
fi
|
||
echo "$$" > $PIDFILE
|
||
trap "rm -f $PIDFILE" EXIT
|
||
|
||
# email adress for notifications
|
||
MAIL=jdoe@example.com
|
||
|
||
# choose "linux" or "bsd"
|
||
SYSTEM=$(uname | tr '[:upper:]' '[:lower:]')
|
||
|
||
SERVERS="node0.backup.example.com:2XXX node1.backup.example.com:2XXX"
|
||
SSH_CONNECT_TIMEOUT=10
|
||
|
||
test_server() {
|
||
item=$1
|
||
host=$(echo ${item} | cut -d':' -f1)
|
||
port=$(echo ${item} | cut -d':' -f2)
|
||
|
||
# Test if the server is accepting connections
|
||
ssh -q -o "ConnectTimeout ${SSH_CONNECT_TIMEOUT}" ${host} -p ${port} -t "exit"
|
||
|
||
if [ $? = 0 ]; then
|
||
return 0
|
||
else
|
||
echo "Failed to connect to \`${item}' within ${SSH_CONNECT_TIMEOUT} seconds" >&2
|
||
return 1
|
||
fi
|
||
}
|
||
pick_server() {
|
||
inc=${1:-0}
|
||
list_length=$(echo "${SERVERS}" | wc -w)
|
||
|
||
if [ "${inc}" -ge "${list_length}" ]; then
|
||
echo "No more server available" >&2
|
||
return 1
|
||
fi
|
||
|
||
salt=$(hostname | cksum | cut -d' ' -f1)
|
||
item=$(echo $(( ($(date +%d) + salt + inc) % list_length )))
|
||
field=$(( item + 1 ))
|
||
|
||
echo "${SERVERS}" | cut -d' ' -f${field}
|
||
}
|
||
|
||
n=0
|
||
SERVER=""
|
||
while :; do
|
||
server=$(pick_server "${n}")
|
||
test $? = 0 || exit 2
|
||
|
||
if test_server "${server}"; then
|
||
SERVER="${server}"
|
||
break
|
||
else
|
||
n=$(( n + 1 ))
|
||
fi
|
||
done
|
||
|
||
SSH_SERVER=$(echo $SERVER | cut -d':' -f1)
|
||
SSH_PORT=$(echo $SERVER | cut -d':' -f2)
|
||
|
||
## We use /home/backup : feel free to use your own dir
|
||
mkdir -p -m 700 /home/backup
|
||
|
||
## OpenLDAP : example with slapcat
|
||
# slapcat -l /home/backup/ldap.bak
|
||
|
||
### MySQL
|
||
|
||
## example with global and compressed mysqldump
|
||
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \
|
||
# --opt --all-databases --force --events --hex-blob | gzip --best > /home/backup/mysql.bak.gz
|
||
|
||
## example with two dumps for each table (.sql/.txt) for all databases
|
||
# for i in $(echo SHOW DATABASES | mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \
|
||
# | egrep -v "^(Database|information_schema|performance_schema|sys)" ); \
|
||
# do mkdir -p -m 700 /home/mysqldump/$i ; chown -RL mysql /home/mysqldump ; \
|
||
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments \
|
||
# --fields-enclosed-by='\"' --fields-terminated-by=',' -T /home/mysqldump/$i $i; done
|
||
|
||
## example with compressed SQL dump for each databases
|
||
# mkdir -p -m 700 /home/mysqldump/
|
||
# for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \
|
||
# | egrep -v "^(Database|information_schema|performance_schema|sys)"); do
|
||
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob $i | gzip --best > /home/mysqldump/${i}.sql.gz
|
||
# done
|
||
|
||
## example with *one* uncompressed SQL dump for *one* database (MYBASE)
|
||
# mkdir -p -m 700 /home/mysqldump/MYBASE
|
||
# chown -RL mysql /home/mysqldump/
|
||
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -Q \
|
||
# --opt --events --hex-blob --skip-comments -T /home/mysqldump/MYBASE MYBASE
|
||
|
||
## example with mysqlhotcopy
|
||
# mkdir -p -m 700 /home/mysqlhotcopy/
|
||
# mysqlhotcopy BASE /home/mysqlhotcopy/
|
||
|
||
## example for multiples MySQL instances
|
||
# mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3)
|
||
# grep -E "^port\s*=\s*\d*" /etc/mysql/my.cnf |while read instance; do
|
||
# instance=$(echo "$instance"|awk '{ print $3 }')
|
||
# if [ "$instance" != "3306" ]
|
||
# then
|
||
# mysqldump -P $instance --opt --all-databases --hex-blob -u mysqladmin -p$mysqladminpasswd > /home/backup/mysql.$instance.bak
|
||
# fi
|
||
# done
|
||
|
||
### PostgreSQL
|
||
|
||
## example with pg_dumpall (warning: you need space in ~postgres)
|
||
# su - postgres -c "pg_dumpall > ~/pg.dump.bak"
|
||
# mv ~postgres/pg.dump.bak /home/backup/
|
||
## another method with gzip directly piped
|
||
# cd /var/lib/postgresql
|
||
# sudo -u postgres pg_dumpall | gzip > /home/backup/pg.dump.bak.gz
|
||
# cd - > /dev/null
|
||
|
||
## example with all tables from MYBASE excepts TABLE1 and TABLE2
|
||
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f /home/backup/pg-backup.tar -t 'TABLE1' -t 'TABLE2' MYBASE
|
||
|
||
## example with only TABLE1 and TABLE2 from MYBASE
|
||
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f /home/backup/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE
|
||
|
||
## MongoDB : example with mongodump
|
||
## don't forget to create use with read-only access
|
||
## > use admin
|
||
## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } )
|
||
# test -d /home/backup/mongodump/ && rm -rf /home/backup/mongodump/
|
||
# mkdir -p -m 700 /home/backup/mongodump/
|
||
# mongodump --quiet -u mongobackup -pPASS -o /home/backup/mongodump/
|
||
# if [ $? -ne 0 ]; then
|
||
# echo "Error with mongodump!"
|
||
# fi
|
||
|
||
## Redis : example with copy .rdb file
|
||
# cp /var/lib/redis/dump.rdb /home/backup/
|
||
|
||
## ElasticSearch, take a snapshot as a backup.
|
||
## Warning: You need to have a path.repo configured.
|
||
## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes
|
||
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log
|
||
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log
|
||
## Clustered version here
|
||
## It basically the same thing except that you need to check that NFS is mounted
|
||
# if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2'
|
||
# then
|
||
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log
|
||
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log
|
||
# else
|
||
# echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.'
|
||
# fi
|
||
## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by :
|
||
# for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do
|
||
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}'
|
||
# done
|
||
# date=$(date +%F)
|
||
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" -o /tmp/es_snapshot_${date}.log
|
||
|
||
## RabbitMQ : export config
|
||
#rabbitmqadmin export /home/backup/rabbitmq.config >> /var/log/evobackup.log
|
||
|
||
## Dump MBR / table partitions with dd and sfdisk
|
||
## Linux
|
||
#for disk in $(ls /dev/[sv]d[a-z] 2>/dev/null); do
|
||
# name=$(basename $disk)
|
||
# dd if=$disk of=/home/backup/MBR-$name bs=512 count=1 2>&1 | egrep -v "(records in|records out|512 bytes)"
|
||
# fdisk -l $disk > /home/backup/partitions-$name
|
||
#done
|
||
#cat /home/backup/partitions-* > /home/backup/partitions
|
||
## OpenBSD
|
||
# disklabel sd0 > /home/backup/partitions
|
||
|
||
# backup MegaCli config
|
||
#megacli -CfgSave -f /home/backup/megacli_conf.dump -a0 >/dev/null
|
||
|
||
## Dump system and kernel versions
|
||
uname -a > /home/backup/uname
|
||
|
||
## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls)
|
||
for addr in 8.8.8.8 www.evolix.fr travaux.evolix.net; do
|
||
mtr -r $addr > /home/backup/mtr-${addr}
|
||
traceroute -n $addr > /home/backup/traceroute-${addr} 2>&1
|
||
done
|
||
|
||
## Dump process with ps
|
||
ps auwwx >/home/backup/ps.out
|
||
|
||
if [ "$SYSTEM" = "linux" ]; then
|
||
## Dump network connections with netstat
|
||
netstat -taupen >/home/backup/netstat.out
|
||
|
||
## List Debian packages
|
||
dpkg -l >/home/backup/packages
|
||
dpkg --get-selections >/home/backup/packages.getselections
|
||
apt-cache dumpavail >/home/backup/packages.available
|
||
else
|
||
## Dump network connections with netstat
|
||
netstat -finet -atn >/home/backup/netstat.out
|
||
|
||
## List OpenBSD packages
|
||
pkg_info -m >/home/backup/packages
|
||
fi
|
||
|
||
HOSTNAME=$(hostname)
|
||
|
||
BEGINNING=$(/bin/date +"%d-%m-%Y ; %H:%M")
|
||
|
||
if [ "$SYSTEM" = "linux" ]; then
|
||
rep="/bin /boot /lib /opt /sbin /usr"
|
||
else
|
||
rep="/bsd /bin /sbin /usr"
|
||
fi
|
||
|
||
rsync -avzh --stats --delete --delete-excluded --force --ignore-errors --partial \
|
||
--exclude "lost+found" \
|
||
--exclude ".nfs.*" \
|
||
--exclude "/var/log" \
|
||
--exclude "/var/log/evobackup*" \
|
||
--exclude "/var/lib/mysql" \
|
||
--exclude "/var/lib/postgres" \
|
||
--exclude "/var/lib/postgresql" \
|
||
--exclude "/var/lib/sympa" \
|
||
--exclude "/var/lib/metche" \
|
||
--exclude "/var/run" \
|
||
--exclude "/var/lock" \
|
||
--exclude "/var/state" \
|
||
--exclude "/var/apt" \
|
||
--exclude "/var/cache" \
|
||
--exclude "/usr/src" \
|
||
--exclude "/usr/doc" \
|
||
--exclude "/usr/share/doc" \
|
||
--exclude "/usr/obj" \
|
||
--exclude "dev" \
|
||
--exclude "/var/spool/postfix" \
|
||
--exclude "/var/lib/amavis/amavisd.sock" \
|
||
--exclude "/var/lib/munin/*tmp*" \
|
||
--exclude "/var/lib/php5" \
|
||
--exclude "/var/spool/squid" \
|
||
--exclude "/var/lib/elasticsearch" \
|
||
--exclude "/var/lib/amavis/tmp" \
|
||
--exclude "/var/lib/clamav/*.tmp" \
|
||
--exclude "/home/mysqltmp" \
|
||
--exclude "/var/lib/php/sessions" \
|
||
$rep \
|
||
/etc \
|
||
/root \
|
||
/var \
|
||
/home \
|
||
/srv \
|
||
-e "ssh -p ${SSH_PORT}" \
|
||
"root@${SSH_SERVER}:/var/backup/" \
|
||
| tail -30 >> /var/log/evobackup.log
|
||
|
||
END=$(/bin/date +"%d-%m-%Y ; %H:%M")
|
||
|
||
echo "EvoBackup - $HOSTNAME - START $BEGINNING" \
|
||
>> /var/log/evobackup.log
|
||
|
||
echo "EvoBackup - $HOSTNAME - STOP $END" \
|
||
>> /var/log/evobackup.log
|
||
|
||
tail -10 /var/log/evobackup.log | \
|
||
mail -s "[info] EvoBackup - Client $HOSTNAME" \
|
||
$MAIL
|