#!/bin/bash # # Script Evobackup client # See https://forge.evolix.org/projects/evobackup # # Author: Gregory Colpart # Contributor: Romain Dessort , Benoît Série , Tristan Pilat , Victor Laborie , Jérémy Lecour # Licence: AGPLv3 # ## lang = C for english outputs export LANGUAGE=C export LANG=C ## Force umask umask 077 ## Verify other evobackup process and kill if needed PIDFILE=/var/run/evobackup.pid if [ -e ${PIDFILE} ]; then pid=$(cat "${PIDFILE}") # Killing the childs of evobackup. for ppid in $(ps h --ppid "${pid}" -o pid | tr -s '\n' ' '); do kill -9 "${ppid}"; done # Then kill the main PID. kill -9 "${pid}" echo "$0 tourne encore (PID ${pid}). Processus killé" >&2 fi echo "$$" > ${PIDFILE} trap "rm -f ${PIDFILE}" EXIT # port SSH SSH_PORT=2XXX # email adress for notifications MAIL=jdoe@example.com # choose "linux" or "bsd" SYSTEM=$(uname | tr '[:upper:]' '[:lower:]') # list of backup servers to use NODES=( "node0.backup.example.com" "node1.backup.example.com" ) NB_NODES=${#NODES[@]} # select a server depending on the current date I=$((10#$(date +%d) % ${NB_NODES})) SRV=${NODES[${I}]} ## We use /home/backup : feel free to use your own dir LOCAL_BACKUP_DIR=/home/backup mkdir -p -m 700 "${LOCAL_BACKUP_DIR}" ## OpenLDAP : example with slapcat # slapcat -l ${LOCAL_BACKUP_DIR}/ldap.bak ### MySQL ## example with global and compressed mysqldump # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ # --opt --all-databases --force --events --hex-blob | gzip --best > ${LOCAL_BACKUP_DIR}/mysql.bak.gz ## example with two dumps for each table (.sql/.txt) for all databases # for i in $(echo SHOW DATABASES | mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \ # | egrep -v "^(Database|information_schema|performance_schema)" ); \ # do mkdir -p /home/mysqldump/${i} ; chown -RL mysql /home/mysqldump ; \ # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments -T \ # /home/mysqldump/${i} ${i}; done ## example with compressed SQL dump for each databases # mkdir -p /home/mysqldump/ # for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \ # | egrep -v "^(Database|information_schema|performance_schema)"); do # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob ${i} | gzip --best > /home/mysqldump/${i}.sql.gz # done ## example with *one* uncompressed SQL dump for *one* database (MYBASE) # mkdir -p -m 700 /home/mysqldump/MYBASE # chown -RL mysql /home/mysqldump/ # mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -Q \ # --opt --events --hex-blob --skip-comments -T /home/mysqldump/MYBASE MYBASE ## example with mysqlhotcopy # mkdir -p /home/mysqlhotcopy/ # mysqlhotcopy BASE /home/mysqlhotcopy/ ## example for multiples MySQL instances # mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3) # grep -E "^port\s*=\s*\d*" /etc/mysql/my.cnf |while read instance; do # instance=$(echo "${instance}"|awk '{ print $3 }') # if [ "${instance}" != "3306" ] # then # mysqldump -P ${instance} --opt --all-databases --hex-blob -u mysqladmin -p${mysqladminpasswd} > ${LOCAL_BACKUP_DIR}/mysql.${instance}.bak # fi # done ### PostgreSQL ## example with pg_dumpall (warning: you need space in ~postgres) # su - postgres -c "pg_dumpall > ~/pg.dump.bak" # mv ~postgres/pg.dump.bak ${LOCAL_BACKUP_DIR}/ ## another method with gzip directly piped # cd /var/lib/postgresql # sudo -u postgres pg_dumpall | gzip > ${LOCAL_BACKUP_DIR}/pg.dump.bak.gz # cd - > /dev/null ## example with all tables from MYBASE excepts TABLE1 and TABLE2 # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -t 'TABLE1' -t 'TABLE2' MYBASE ## example with only TABLE1 and TABLE2 from MYBASE # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE ## MongoDB : example with mongodump ## don't forget to create use with read-only access ## > use admin ## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } ) # mongodump --quiet -u mongobackup -pPASS -o ${LOCAL_BACKUP_DIR}/mongodump/ # if [ $? -ne 0 ]; then # echo "Error with mongodump!" # fi ## Redis : example with copy .rdb file # cp /var/lib/redis/dump.rdb ${LOCAL_BACKUP_DIR}/ ## ElasticSearch : example with rsync (warning: don't forget to use NFS if you have a cluster) ## Disable ES translog flush # curl -s -XPUT 'localhost:9200/_settings' -d '{"index.translog.disable_flush": true}' >/dev/null ## Flushes translog # curl -s 'localhost:9200/_flush' | grep -qe '"ok":true' ## If it succeed, do an rsync of the datadir # if [ $? -eq 0 ]; then # rsync -a /var/lib/elasticsearch ${LOCAL_BACKUP_DIR}/ # else # echo "Error when flushing ES translog indexes." # fi ## In any case re-enable translog flush # curl -s -XPUT 'localhost:9200/_settings' -d '{"index.translog.disable_flush": false}' > /dev/null ## RabbitMQ : export config #rabbitmqadmin export ${LOCAL_BACKUP_DIR}/rabbitmq.config >> /var/log/evobackup.log ## Dump MBR / table partitions with dd and sfdisk ## Linux #for disk in $(ls /dev/[sv]d[a-z] 2>/dev/null); do # name=$(basename ${disk}) # dd if=${disk} of=${LOCAL_BACKUP_DIR}/MBR-${name} bs=512 count=1 2>&1 | egrep -v "(records in|records out|512 bytes)" # fdisk -l ${disk} > ${LOCAL_BACKUP_DIR}/partitions-${name} #done #cat ${LOCAL_BACKUP_DIR}/partitions-* > ${LOCAL_BACKUP_DIR}/partitions ## OpenBSD # disklabel sd0 > ${LOCAL_BACKUP_DIR}/partitions # backup MegaCli config #megacli -CfgSave -f ${LOCAL_BACKUP_DIR}/megacli_conf.dump -a0 >/dev/null ## Dump system and kernel versions uname -a > ${LOCAL_BACKUP_DIR}/uname ## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls) for addr in 8.8.8.8 www.evolix.fr travaux.evolix.net; do mtr -r ${addr} > ${LOCAL_BACKUP_DIR}/mtr-${addr} traceroute -n ${addr} > ${LOCAL_BACKUP_DIR}/traceroute-${addr} done ## Dump process with ps ps aux >${LOCAL_BACKUP_DIR}/ps.out if [ "${SYSTEM}" = "linux" ]; then ## Dump network connections with netstat netstat -taupen >${LOCAL_BACKUP_DIR}/netstat.out ## List Debian packages dpkg -l >${LOCAL_BACKUP_DIR}/packages dpkg --get-selections >${LOCAL_BACKUP_DIR}/packages.getselections else ## Dump network connections with netstat netstat -finet -atn >${LOCAL_BACKUP_DIR}/netstat.out ## List OpenBSD packages pkg_info -m >${LOCAL_BACKUP_DIR}/packages fi HOSTNAME=$(hostname) BEGINNING=$(/bin/date +"%d-%m-%Y ; %H:%M") if [ "${SYSTEM}" = "linux" ]; then rep="/bin /boot /lib /opt /sbin /usr" else rep="/bsd /bin /sbin /usr" fi rsync -avzh --stats --delete --delete-excluded --force --ignore-errors --partial \ --exclude "lost+found" \ --exclude ".nfs.*" \ --exclude "/var/log" \ --exclude "/var/log/evobackup*" \ --exclude "/var/lib/mysql" \ --exclude "/var/lib/postgres" \ --exclude "/var/lib/postgresql" \ --exclude "/var/lib/sympa" \ --exclude "/var/lib/metche" \ --exclude "/var/run" \ --exclude "/var/lock" \ --exclude "/var/state" \ --exclude "/var/apt" \ --exclude "/var/cache" \ --exclude "/usr/src" \ --exclude "/usr/doc" \ --exclude "/usr/share/doc" \ --exclude "/usr/obj" \ --exclude "dev" \ --exclude "/var/spool/postfix" \ --exclude "/var/lib/amavis/amavisd.sock" \ --exclude "/var/lib/munin/*tmp*" \ --exclude "/var/lib/php5" \ --exclude "/var/spool/squid" \ --exclude "/var/lib/elasticsearch" \ --exclude "/var/lib/amavis/tmp" \ --exclude "/var/lib/clamav/*.tmp" \ --exclude "/home/mysqltmp" \ --exclude "/var/lib/php/sessions" \ ${rep} \ /etc \ /root \ /var \ /home \ /srv \ -e "ssh -p ${SSH_PORT}" \ "root@${SRV}:/var/backup/" \ | tail -30 >> /var/log/evobackup.log END=$(/bin/date +"%d-%m-%Y ; %H:%M") echo "EvoBackup - ${HOSTNAME} - START ${BEGINNING}" \ >> /var/log/evobackup.log echo "EvoBackup - ${HOSTNAME} - STOP ${END}" \ >> /var/log/evobackup.log tail -10 /var/log/evobackup.log | \ mail -s "[info] EvoBackup - Client ${HOSTNAME}" \ "${MAIL}"