#!/bin/sh # # Script Evobackup client # See https://gitea.evolix.org/evolix/evobackup # # Author: Gregory Colpart # Contributors: # Romain Dessort # Benoît Série # Tristan Pilat # Victor Laborie # Jérémy Lecour # # Licence: AGPLv3 # # The following variables must be changed: # SSH_PORT: The Port used for the ssh(1) jail on the backup server # MAIL: The email address to send notifications to. # SRV: The hostname or IP address of the backup server. # # You must then uncomment the various # examples that best suit your case # PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/sbin:/usr/local/bin ## lang = C for english outputs export LANGUAGE=C export LANG=C ## Force umask umask 077 ## Verify other evobackup process and kill if needed PIDFILE=/var/run/evobackup.pid if [ -e $PIDFILE ]; then pid=$(cat "$PIDFILE") # Killing the childs of evobackup. for ppid in $(ps h --ppid "$pid" -o pid | tr -s '\n' ' '); do kill -9 "$ppid"; done # Then kill the main PID. kill -9 "$pid" echo "$0 tourne encore (PID $pid). Processus killé" >&2 fi echo "$$" > $PIDFILE trap "rm -f $PIDFILE" EXIT # port SSH SSH_PORT=2XXX # email adress for notifications MAIL=jdoe@example.com # choose "linux" or "bsd" SYSTEM=$(uname | tr '[:upper:]' '[:lower:]') # Variable to choose different backup server with date NODE=$(($(date +%e) % 2)) # serveur address for rsync SRV="node$NODE.backup.example.com" ## We use /home/backup : feel free to use your own dir mkdir -p -m 700 /home/backup ## OpenLDAP : example with slapcat # slapcat -l /home/backup/ldap.bak ### MySQL ## example with global and compressed mysqldump # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ # --opt --all-databases --force --events --hex-blob | gzip --best > /home/backup/mysql.bak.gz ## example with two dumps for each table (.sql/.txt) for all databases # for i in $(echo SHOW DATABASES | mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ # | egrep -v "^(Database|information_schema|performance_schema|sys)" ); \ # do mkdir -p -m 700 /home/mysqldump/$i ; chown -RL mysql /home/mysqldump ; \ # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments \ # --fields-enclosed-by='\"' --fields-terminated-by=',' -T /home/mysqldump/$i $i; done ## example with compressed SQL dump for each databases # mkdir -p -m 700 /home/mysqldump/ # for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \ # | egrep -v "^(Database|information_schema|performance_schema|sys)"); do # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob $i | gzip --best > /home/mysqldump/${i}.sql.gz # done ## example with *one* uncompressed SQL dump for *one* database (MYBASE) # mkdir -p -m 700 /home/mysqldump/MYBASE # chown -RL mysql /home/mysqldump/ # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -Q \ # --opt --events --hex-blob --skip-comments -T /home/mysqldump/MYBASE MYBASE ## example with mysqlhotcopy # mkdir -p -m 700 /home/mysqlhotcopy/ # mysqlhotcopy BASE /home/mysqlhotcopy/ ## example for multiples MySQL instances # mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3) # grep -E "^port\s*=\s*\d*" /etc/mysql/my.cnf |while read instance; do # instance=$(echo "$instance"|awk '{ print $3 }') # if [ "$instance" != "3306" ] # then # mysqldump -P $instance --opt --all-databases --hex-blob -u mysqladmin -p$mysqladminpasswd > /home/backup/mysql.$instance.bak # fi # done ### PostgreSQL ## example with pg_dumpall (warning: you need space in ~postgres) # su - postgres -c "pg_dumpall > ~/pg.dump.bak" # mv ~postgres/pg.dump.bak /home/backup/ ## another method with gzip directly piped # cd /var/lib/postgresql # sudo -u postgres pg_dumpall | gzip > /home/backup/pg.dump.bak.gz # cd - > /dev/null ## example with all tables from MYBASE excepts TABLE1 and TABLE2 # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f /home/backup/pg-backup.tar -t 'TABLE1' -t 'TABLE2' MYBASE ## example with only TABLE1 and TABLE2 from MYBASE # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f /home/backup/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE ## MongoDB : example with mongodump ## don't forget to create use with read-only access ## > use admin ## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } ) # test -d /home/backup/mongodump/ && rm -rf /home/backup/mongodump/ # mkdir -p -m 700 /home/backup/mongodump/ # mongodump --quiet -u mongobackup -pPASS -o /home/backup/mongodump/ # if [ $? -ne 0 ]; then # echo "Error with mongodump!" # fi ## Redis : example with copy .rdb file # cp /var/lib/redis/dump.rdb /home/backup/ ## ElasticSearch, take a snapshot as a backup. ## Warning: You need to have a path.repo configured. ## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log ## Clustered version here ## It basically the same thing except that you need to check that NFS is mounted # if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2' # then # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" -o /tmp/es_snapshot.daily.log # else # echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.' # fi ## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by : # for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}' # done # date=$(date +%F) # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" -o /tmp/es_snapshot_${date}.log ## RabbitMQ : export config #rabbitmqadmin export /home/backup/rabbitmq.config >> /var/log/evobackup.log ## Dump MBR / table partitions with dd and sfdisk ## Linux #for disk in $(ls /dev/[sv]d[a-z] 2>/dev/null); do # name=$(basename $disk) # dd if=$disk of=/home/backup/MBR-$name bs=512 count=1 2>&1 | egrep -v "(records in|records out|512 bytes)" # fdisk -l $disk > /home/backup/partitions-$name #done #cat /home/backup/partitions-* > /home/backup/partitions ## OpenBSD # disklabel sd0 > /home/backup/partitions # backup MegaCli config #megacli -CfgSave -f /home/backup/megacli_conf.dump -a0 >/dev/null ## Dump system and kernel versions uname -a > /home/backup/uname ## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls) for addr in 8.8.8.8 www.evolix.fr travaux.evolix.net; do mtr -r $addr > /home/backup/mtr-${addr} traceroute -n $addr > /home/backup/traceroute-${addr} 2>&1 done ## Dump process with ps ps auwwx >/home/backup/ps.out if [ "$SYSTEM" = "linux" ]; then ## Dump network connections with netstat netstat -taupen >/home/backup/netstat.out ## List Debian packages dpkg -l >/home/backup/packages dpkg --get-selections >/home/backup/packages.getselections apt-cache dumpavail >/home/backup/packages.available else ## Dump network connections with netstat netstat -finet -atn >/home/backup/netstat.out ## List OpenBSD packages pkg_info -m >/home/backup/packages fi HOSTNAME=$(hostname) BEGINNING=$(/bin/date +"%d-%m-%Y ; %H:%M") if [ "$SYSTEM" = "linux" ]; then rep="/bin /boot /lib /opt /sbin /usr" else rep="/bsd /bin /sbin /usr" fi rsync -avzh --stats --delete --delete-excluded --force --ignore-errors --partial \ --exclude "lost+found" \ --exclude ".nfs.*" \ --exclude "/var/log" \ --exclude "/var/log/evobackup*" \ --exclude "/var/lib/mysql" \ --exclude "/var/lib/postgres" \ --exclude "/var/lib/postgresql" \ --exclude "/var/lib/sympa" \ --exclude "/var/lib/metche" \ --exclude "/var/run" \ --exclude "/var/lock" \ --exclude "/var/state" \ --exclude "/var/apt" \ --exclude "/var/cache" \ --exclude "/usr/src" \ --exclude "/usr/doc" \ --exclude "/usr/share/doc" \ --exclude "/usr/obj" \ --exclude "dev" \ --exclude "/var/spool/postfix" \ --exclude "/var/lib/amavis/amavisd.sock" \ --exclude "/var/lib/munin/*tmp*" \ --exclude "/var/lib/php5" \ --exclude "/var/spool/squid" \ --exclude "/var/lib/elasticsearch" \ --exclude "/var/lib/amavis/tmp" \ --exclude "/var/lib/clamav/*.tmp" \ --exclude "/home/mysqltmp" \ --exclude "/var/lib/php/sessions" \ $rep \ /etc \ /root \ /var \ /home \ /srv \ -e "ssh -p $SSH_PORT" \ "root@$SRV:/var/backup/" \ | tail -30 >> /var/log/evobackup.log END=$(/bin/date +"%d-%m-%Y ; %H:%M") echo "EvoBackup - $HOSTNAME - START $BEGINNING" \ >> /var/log/evobackup.log echo "EvoBackup - $HOSTNAME - STOP $END" \ >> /var/log/evobackup.log tail -10 /var/log/evobackup.log | \ mail -s "[info] EvoBackup - Client $HOSTNAME" \ $MAIL