#!/bin/bash # # Script Evobackup client # See https://gitea.evolix.org/evolix/evobackup # # Authors: Evolix , # Gregory Colpart , # Romain Dessort , # Benoit Série , # Tristan Pilat , # Victor Laborie , # Jérémy Lecour # and others. # # Licence: AGPLv3 # # /!\ DON'T FORGET TO SET "MAIL" and "SERVERS" VARIABLES ##### Configuration ################################################### VERSION="22.12" # email adress for notifications MAIL=jdoe@example.com # list of hosts (hostname or IP) and SSH port for Rsync SERVERS="node0.backup.example.com:2XXX node1.backup.example.com:2XXX" # explicit PATH PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/sbin:/usr/local/bin # Should we fallback on other servers when the first one is unreachable? SERVERS_FALLBACK=${SERVERS_FALLBACK:-1} # timeout (in seconds) for SSH connections SSH_CONNECT_TIMEOUT=${SSH_CONNECT_TIMEOUT:-90} # We use /home/backup : feel free to use your own dir LOCAL_BACKUP_DIR="/home/backup" # You can set "linux" or "bsd" manually or let it choose automatically SYSTEM=$(uname | tr '[:upper:]' '[:lower:]') # Store pid in a file named after this program's name PROGNAME=$(basename "$0") PIDFILE="/var/run/${PROGNAME}.pid" # Customize the log path if you have multiple scripts and with separate logs LOGFILE="/var/log/evobackup.log" # Full Rsync log file, reset each time RSYNC_LOGFILE="/var/log/${PROGNAME}.rsync.log" HOSTNAME=$(hostname) DATE_FORMAT="%Y-%m-%d %H:%M:%S" # Enable/disable local tasks (default: enabled) : "${LOCAL_TASKS:=1}" # Enable/disable sync tasks (default: enabled) : "${SYNC_TASKS:=1}" CANARY_FILE="/zzz_evobackup_canary" # Source paths can be customized # Empty lines, and lines containing # or ; are ignored RSYNC_INCLUDES=" /etc /root /var /home " # Excluded paths can be customized # Empty lines, and lines beginning with # or ; are ignored RSYNC_EXCLUDES=" /dev /proc /run /sys /tmp /usr/doc /usr/obj /usr/share/doc /usr/src /var/apt /var/cache /var/db/munin/*.tmp /var/lib/amavis/amavisd.sock /var/lib/amavis/tmp /var/lib/clamav/*.tmp /var/lib/elasticsearch /var/lib/metche /var/lib/mongodb /var/lib/munin/*tmp* /var/lib/mysql /var/lib/php/sessions /var/lib/php5 /var/lib/postgres /var/lib/postgresql /var/lib/sympa /var/lock /var/run /var/spool/postfix /var/spool/smtpd /var/spool/squid /var/state /var/tmp lost+found .nfs.* lxc/*/rootfs/tmp lxc/*/rootfs/usr/doc lxc/*/rootfs/usr/obj lxc/*/rootfs/usr/share/doc lxc/*/rootfs/usr/src lxc/*/rootfs/var/apt lxc/*/rootfs/var/cache lxc/*/rootfs/var/lib/php5 lxc/*/rootfs/var/lib/php/sessions lxc/*/rootfs/var/lock lxc/*/rootfs/var/run lxc/*/rootfs/var/state lxc/*/rootfs/var/tmp /home/mysqltmp " ##### FUNCTIONS ####################################################### mysql_list_databases() { port=${1:-"3306"} mysql --defaults-extra-file=/etc/mysql/debian.cnf -P ${port} -e 'show databases' -s --skip-column-names | grep --extended-regexp --invert-match "^(Database|information_schema|performance_schema|sys)" } # shellcheck disable=SC2317 dump_ldap() { ## OpenLDAP : example with slapcat dump_dir="${LOCAL_BACKUP_DIR}/ldap/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" slapcat -n 0 -l "${dump_dir}/config.bak" slapcat -n 1 -l "${dump_dir}/data.bak" slapcat -l "${dump_dir}/ldap.bak" } # shellcheck disable=SC2317 dump_mysql_global() { dump_dir="${LOCAL_BACKUP_DIR}/mysql-global/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 --opt --all-databases --force --events --hex-blob 2> "${dump_dir}/mysql.bak.err" | gzip --best > "${dump_dir}/mysql.bak.gz" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysqldump (global compressed) returned an error ${last_rc}, check ${dump_dir}/mysql.bak.err" rc=101 else rm -f "${dump_dir}/mysql.bak.err" fi } # shellcheck disable=SC2317 dump_mysql_per_base() { dump_dir="${LOCAL_BACKUP_DIR}/mysql-per-base/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" for i in $(mysql_list_databases 3306); do mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob $i 2> "${dump_dir}/${i}.err" | gzip --best > "${dump_dir}/${i}.sql.gz" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysqldump (${i} compressed) returned an error ${last_rc}, check ${dump_dir}/${i}.err" rc=102 else rm -f "${dump_dir}/${i}.err" fi done } # shellcheck disable=SC2317 dump_mysql_meta() { dump_dir="${LOCAL_BACKUP_DIR}/mysql-meta/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" ## Dump all grants (requires 'percona-toolkit' package) pt-show-grants --flush --no-header 2> "${dump_dir}/all_grants.err" > "${dump_dir}/all_grants.sql" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "pt-show-grants returned an error ${last_rc}, check ${dump_dir}/all_grants.err" rc=103 else rm -f "${dump_dir}/all_grants.err" fi ## Dump all variables mysql -A -e"SHOW GLOBAL VARIABLES;" 2> "${dump_dir}/variables.err" > "${dump_dir}/variables.txt" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysql (variables) returned an error ${last_rc}, check ${dump_dir}/variables.err" rc=104 else rm -f "${dump_dir}/variables.err" fi ## Schema only (no data) for each databases for i in $(mysql_list_databases 3306); do mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --no-data --databases $i 2> "${dump_dir}/${i}.schema.err" > "${dump_dir}/${i}.schema.sql" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysqldump (${i} schema) returned an error ${last_rc}, check ${dump_dir}/${i}.schema.err" rc=105 else rm -f "${dump_dir}/${i}.schema.err" fi done } # shellcheck disable=SC2317 dump_mysql_tabs() { for i in $(mysql_list_databases 3306); do dump_dir="${LOCAL_BACKUP_DIR}/mysql-tabs/$i" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" chown -RL mysql "${dump_dir}" mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments --fields-enclosed-by='\"' --fields-terminated-by=',' -T "${dump_dir}" $i 2> "${dump_dir}.err" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysqldump (${i} files) returned an error ${last_rc}, check ${dump_dir}.err" rc=107 else rm -f "${dump_dir}.err" fi done } # shellcheck disable=SC2317 dump_mysql_hotcopy() { dump_dir="${LOCAL_BACKUP_DIR}/mysql-hotcopy/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" mysqlhotcopy MYBASE "${dump_dir}/" 2> "${dump_dir}/MYBASE.err" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysqlhotcopy returned an error ${last_rc}, check ${dump_dir}/MYBASE.err" rc=108 else rm -f "${dump_dir}/MYBASE.err" fi } # shellcheck disable=SC2317 dump_mysql_instances() { dump_dir="${LOCAL_BACKUP_DIR}/mysql-instances/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" mysqladminpasswd=$(grep -m1 'password = .*' /root/.my.cnf|cut -d" " -f3) grep --extended-regexp "^port\s*=\s*\d*" /etc/mysql/my.cnf | while read instance; do instance=$(echo "$instance"|awk '{ print $3 }') if [ "$instance" != "3306" ]; then mysqldump -P ${instance} --opt --all-databases --hex-blob -u mysqladmin -p${mysqladminpasswd} 2> "${dump_dir}/${instance}.err" | gzip --best > "${dump_dir}/${instance}.bak.gz" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "mysqldump (instance ${instance}) returned an error ${last_rc}, check ${dump_dir}/${instance}.err" rc=107 else rm -f "${dump_dir}/${instance}.err" fi fi done } # shellcheck disable=SC2317 dump_postgresql_global() { dump_dir="${LOCAL_BACKUP_DIR}/postgresql-global/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" ## example with pg_dumpall # WARNING: you need space in ~postgres su - postgres -c "pg_dumpall > ~/pg.dump.bak" mv ~postgres/pg.dump.bak "${dump_dir}/" ## another method with gzip directly piped # ( # cd /var/lib/postgresql; # sudo -u postgres pg_dumpall | gzip > ${dump_dir}/pg.dump.bak.gz # ) } # shellcheck disable=SC2317 dump_postgresql_per_base() { dump_dir="${LOCAL_BACKUP_DIR}/postgresql-per-base/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" ( cd /var/lib/postgresql databases=$(sudo -u postgres psql -U postgres -lt | awk -F\| '{print $1}' | grep -v template*) for database in ${databases} ; do sudo -u postgres /usr/bin/pg_dump --create -s -U postgres -d ${database} | gzip --best -c > ${dump_dir}/$databases.sql.gz done ) } # shellcheck disable=SC2317 dump_postgresql_filtered() { dump_dir="${LOCAL_BACKUP_DIR}/postgresql-filtered/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" ## example with all tables from MYBASE excepts TABLE1 and TABLE2 # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f "${dump_dir}/pg-backup.tar" -t 'TABLE1' -t 'TABLE2' MYBASE ## example with only TABLE1 and TABLE2 from MYBASE # pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f "${dump_dir}/pg-backup.tar" -T 'TABLE1' -T 'TABLE2' MYBASE } # shellcheck disable=SC2317 dump_redis() { for instance in $(find /var/lib/ -mindepth 1 -maxdepth 1 -type d -name 'redis*'); do name=$(basename "${instance}") dump_dir="${LOCAL_BACKUP_DIR}/${name}/" rm -rf "${dump_dir}" if [ -f "${instance}/dump.rdb" ]; then mkdir -p -m 700 "${dump_dir}" cp -a "${instance}/dump.rdb" "${dump_dir}/" fi done } # shellcheck disable=SC2317 dump_mongodb() { ## don't forget to create use with read-only access ## > use admin ## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } ) dump_dir="${LOCAL_BACKUP_DIR}/mongodump/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" mongo_user="" mongo_password="" mongodump --quiet -u ${mongo_user} -p${mongo_password} -o "${dump_dir}/" if [ $? -ne 0 ]; then echo "Error with mongodump!" fi } # shellcheck disable=SC2317 dump_megacli_config() { megacli -CfgSave -f "${LOCAL_BACKUP_DIR}/megacli_conf.dump" -a0 >/dev/null } # shellcheck disable=SC2317 dump_traceroute() { dump_dir="${LOCAL_BACKUP_DIR}/traceroute/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" network_targets="8.8.8.8 www.evolix.fr travaux.evolix.net" mtr_bin=$(command -v mtr) if [ -n "${network_targets}" ] && [ -n "${mtr_bin}" ]; then for addr in ${network_targets}; do ${mtr_bin} -r "${addr}" > "${dump_dir}/mtr-${addr}" done fi traceroute_bin=$(command -v traceroute) if [ -n "${network_targets}" ] && [ -n "${traceroute_bin}" ]; then for addr in ${network_targets}; do ${traceroute_bin} -n "${addr}" > "${dump_dir}/traceroute-${addr}" 2>&1 done fi } # shellcheck disable=SC2317 dump_server_state() { dump_dir="${LOCAL_BACKUP_DIR}/server-state" rm -rf "${dump_dir}" # Do not create the directory # mkdir -p -m 700 "${dump_dir}" dump_server_state_bin=$(command -v dump-server-state) if [ -z "${dump_server_state_bin}" ]; then error "dump-server-state is missing" rc=1 else if [ "${SYSTEM}" = "linux" ]; then ${dump_server_state_bin} --all --dump-dir "${dump_dir}" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "dump-server-state returned an error ${last_rc}, check ${dump_dir}" rc=1 fi else ${dump_server_state_bin} --all --dump-dir "${dump_dir}" last_rc=$? if [ ${last_rc} -ne 0 ]; then error "dump-server-state returned an error ${last_rc}, check ${dump_dir}" rc=1 fi fi fi } # shellcheck disable=SC2317 dump_rabbitmq() { dump_dir="${LOCAL_BACKUP_DIR}/rabbitmq/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" rabbitmqadmin export "${dump_dir}/config" >> "${LOGFILE}" } # shellcheck disable=SC2317 dump_facl() { dump_dir="${LOCAL_BACKUP_DIR}/facl/" rm -rf "${dump_dir}" mkdir -p -m 700 "${dump_dir}" getfacl -R /etc > "${dump_dir}/etc.txt" getfacl -R /home > "${dump_dir}/home.txt" getfacl -R /usr > "${dump_dir}/usr.txt" getfacl -R /var > "${dump_dir}/var.txt" } # shellcheck disable=SC2317 dump_elasticsearch_snapshot() { ## Take a snapshot as a backup. ## Warning: You need to have a path.repo configured. ## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" >> "${LOGFILE}" curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" >> "${LOGFILE}" # Clustered version here # It basically the same thing except that you need to check that NFS is mounted # if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2' # then # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" >> "${LOGFILE}" # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" >> "${LOGFILE}" # else # echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.' # fi ## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by : # for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do # curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}' # done # date=$(/bin/date +%F) # curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" >> "${LOGFILE}" } local_tasks() { log "START LOCAL_TASKS" # Remove previous error files find "${LOCAL_BACKUP_DIR}/" -type f -name '*.err' -delete # You can comment or uncomment sections below to customize the backup ## OpenLDAP # dump_ldap ## MySQL ### example with global and compressed mysqldump # dump_mysql_global ### example with compressed SQL dump (with data) for each databases # dump_mysql_per_base ### meta-data (grants, variables, schema…) # dump_mysql_meta ### example with two dumps for each table (.sql/.txt) for all databases # dump_mysql_tabs ### example with mysqlhotcopy # dump_mysql_hotcopy ### example for multiples MySQL instances # dump_mysql_instances ## PostgreSQL ### example with global dump # dump_postgresql_global ### example with filtered tables ("only" or "except") # dump_postgresql_filtered ### example with compressed PostgreSQL dump for each databases # dump_postgresql_per_base ## MongoDB # dump_mongodb ## Redis # dump_redis ## ElasticSearch # dump_elasticsearch_snapshot ## RabbitMQ config # dump_rabbitmq ## MegaCli config # dump_megacli_config ## Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls) dump_traceroute ## Dump various information about server state dump_server_state ## Dump file access control lists # dump_facl log "STOP LOCAL_TASKS" } build_rsync_main_cmd() { ################################################################### # /!\ WARNING /!\ WARNING /!\ WARNING /!\ WARNING /!\ WARNING /!\ # ################################################################### # DO NOT USE COMMENTS in rsync lines # # DO NOT ADD WHITESPACES AFTER \ in rsync lines # # It breaks the command and destroys data # # You should not modify this, unless you are really REALLY sure # ################################################################### # Create a temp file for excludes and includes includes_file="$(mktemp "${PROGNAME}.includes.XXXXXX")" excludes_file="$(mktemp "${PROGNAME}.excludes.XXXXXX")" # … and add them to the list of files to delete at exit temp_files="${temp_files} ${includes_file} ${excludes_file}" # Store includes/excludes in files # without blank lines of comments (# or ;) echo "${RSYNC_INCLUDES}" | sed -e 's/\s*\(#\|;\).*//; /^\s*$/d' > "${includes_file}" echo "${RSYNC_EXCLUDES}" | sed -e 's/\s*\(#\|;\).*//; /^\s*$/d' > "${excludes_file}" # Rsync command cmd="$(command -v rsync)" # Rsync main options cmd="${cmd} --archive" cmd="${cmd} --itemize-changes" cmd="${cmd} --quiet" cmd="${cmd} --stats" cmd="${cmd} --human-readable" cmd="${cmd} --relative" cmd="${cmd} --partial" cmd="${cmd} --delete" cmd="${cmd} --delete-excluded" cmd="${cmd} --force" cmd="${cmd} --ignore-errors" cmd="${cmd} --log-file=${RSYNC_LOGFILE}" cmd="${cmd} --rsh='ssh -p ${SSH_PORT} -o \"ConnectTimeout ${SSH_CONNECT_TIMEOUT}\"'" # Rsync excludes while read line ; do cmd="${cmd} --exclude ${line}" done < "${excludes_file}" # Rsync local sources cmd="${cmd} ${default_includes}" while read line ; do cmd="${cmd} ${line}" done < "${includes_file}" # Rsync remote destination cmd="${cmd} root@${SSH_SERVER}:/var/backup/" # output final command echo "${cmd}" } build_rsync_canary_cmd() { # Rsync command cmd="$(command -v rsync)" # Rsync options cmd="${cmd} --rsh='ssh -p ${SSH_PORT} -o \"ConnectTimeout ${SSH_CONNECT_TIMEOUT}\"'" # Rsync local source cmd="${cmd} ${CANARY_FILE}" # Rsync remote destination cmd="${cmd} root@${SSH_SERVER}:/var/backup/" # output final command echo "${cmd}" } sync_tasks() { n=0 server="" if [ "${SERVERS_FALLBACK}" = "1" ]; then # We try to find a suitable server while :; do server=$(pick_server "${n}") test $? = 0 || exit 2 if test_server "${server}"; then break else server="" n=$(( n + 1 )) fi done else # we force the server server=$(pick_server "${n}") fi SSH_SERVER=$(echo "${server}" | cut -d':' -f1) SSH_PORT=$(echo "${server}" | cut -d':' -f2) log "START SYNC_TASKS - server=${server}" # default paths, depending on system if [ "${SYSTEM}" = "linux" ]; then default_includes="/bin /boot /lib /opt /sbin /usr" else default_includes="/bsd /bin /sbin /usr" fi # reset Rsync log file if [ -n "$(command -v truncate)" ]; then truncate -s 0 "${RSYNC_LOGFILE}" else printf "" > "${RSYNC_LOGFILE}" fi # Build the final Rsync command rsync_main_cmd=$(build_rsync_main_cmd) # … log it log "SYNC_TASKS - Rsync main command : ${rsync_main_cmd}" # … execute it eval "${rsync_main_cmd}" rsync_main_rc=$? # Copy last lines of rsync log to the main log tail -n 30 "${RSYNC_LOGFILE}" >> "${LOGFILE}" if [ ${rsync_main_rc} -ne 0 ]; then error "rsync returned an error ${rsync_main_rc}, check ${LOGFILE}" rc=201 else # Build the canary Rsync command rsync_canary_cmd=$(build_rsync_canary_cmd) # … log it log "SYNC_TASKS - Rsync canary command : ${rsync_canary_cmd}" # … execute it eval "${rsync_canary_cmd}" fi log "STOP SYNC_TASKS - server=${server}" } # Call test_server with "HOST:PORT" string # It will return with 0 if the server is reachable. # It will return with 1 and a message on stderr if not. test_server() { item=$1 # split HOST and PORT from the input string host=$(echo "${item}" | cut -d':' -f1) port=$(echo "${item}" | cut -d':' -f2) # Test if the server is accepting connections ssh -q -o "ConnectTimeout ${SSH_CONNECT_TIMEOUT}" "${host}" -p "${port}" -t "exit" # shellcheck disable=SC2181 if [ $? = 0 ]; then # SSH connection is OK return 0 else # SSH connection failed new_error=$(printf "Failed to connect to \`%s' within %s seconds" "${item}" "${SSH_CONNECT_TIMEOUT}") log "${new_error}" SERVERS_SSH_ERRORS=$(printf "%s\\n%s" "${SERVERS_SSH_ERRORS}" "${new_error}" | sed -e '/^$/d') return 1 fi } # Call pick_server with an optional positive integer to get the nth server in the list. pick_server() { increment=${1:-0} list_length=$(echo "${SERVERS}" | wc -w) if [ "${increment}" -ge "${list_length}" ]; then # We've reached the end of the list new_error="No more server available" log "${new_error}" SERVERS_SSH_ERRORS=$(printf "%s\\n%s" "${SERVERS_SSH_ERRORS}" "${new_error}" | sed -e '/^$/d') # Log errors to stderr printf "%s\\n" "${SERVERS_SSH_ERRORS}" >&2 return 1 fi # Extract the day of month, without leading 0 (which would give an octal based number) today=$(/bin/date +%e) # A salt is useful to randomize the starting point in the list # but stay identical each time it's called for a server (based on hostname). salt=$(hostname | cksum | cut -d' ' -f1) # Pick an integer between 0 and the length of the SERVERS list # It changes each day item=$(( (today + salt + increment) % list_length )) # cut starts counting fields at 1, not 0. field=$(( item + 1 )) echo "${SERVERS}" | cut -d' ' -f${field} } log() { msg="${1:-$(cat /dev/stdin)}" pid=$$ printf "[%s] %s[%s]: %s\\n" \ "$(/bin/date +"${DATE_FORMAT}")" "${PROGNAME}" "${pid}" "${msg}" \ >> "${LOGFILE}" } error() { msg="${1:-$(cat /dev/stdin)}" pid=$$ printf "[%s] %s[%s]: %s\\n" \ "$(/bin/date +"${DATE_FORMAT}")" "${PROGNAME}" "${pid}" "${msg}" \ >&2 } main() { START_EPOCH=$(/bin/date +%s) log "START GLOBAL - VERSION=${VERSION} LOCAL_TASKS=${LOCAL_TASKS} SYNC_TASKS=${SYNC_TASKS}" # shellcheck disable=SC2174 mkdir -p -m 700 ${LOCAL_BACKUP_DIR} ## Force umask umask 077 ## Initialize variable to store SSH connection errors SERVERS_SSH_ERRORS="" ## Verify other evobackup process and kill if needed if [ -e "${PIDFILE}" ]; then pid=$(cat "${PIDFILE}") # Does process still exist ? if kill -0 "${pid}" 2> /dev/null; then # Killing the childs of evobackup. for ppid in $(pgrep -P "${pid}"); do kill -9 "${ppid}"; done # Then kill the main PID. kill -9 "${pid}" printf "%s is still running (PID %s). Process has been killed" "$0" "${pid}\\n" >&2 else rm -f "${PIDFILE}" fi fi echo "$$" > "${PIDFILE}" # Initialize a list of files to delete at exit # Any file added to the list will also be deleted at exit temp_files="${PIDFILE}" # shellcheck disable=SC2064 trap "rm -f ${temp_files}" EXIT # Update canary to keep track of each run update-evobackup-canary --who "${PROGNAME}" --file "${CANARY_FILE}" if [ "${LOCAL_TASKS}" = "1" ]; then local_tasks fi if [ "${SYNC_TASKS}" = "1" ]; then sync_tasks fi STOP_EPOCH=$(/bin/date +%s) if [ "${SYSTEM}" = "openbsd" ]; then start_time=$(/bin/date -f "%s" -j "${START_EPOCH}" +"${DATE_FORMAT}") stop_time=$(/bin/date -f "%s" -j "${STOP_EPOCH}" +"${DATE_FORMAT}") else start_time=$(/bin/date --date="@${START_EPOCH}" +"${DATE_FORMAT}") stop_time=$(/bin/date --date="@${STOP_EPOCH}" +"${DATE_FORMAT}") fi duration=$(( STOP_EPOCH - START_EPOCH )) log "STOP GLOBAL - start='${start_time}' stop='${stop_time}' duration=${duration}s" tail -20 "${LOGFILE}" | mail -s "[info] EvoBackup - Client ${HOSTNAME}" ${MAIL} } # set all programs to C language (english) export LC_ALL=C # If expansion is attempted on an unset variable or parameter, the shell prints an # error message, and, if not interactive, exits with a non-zero status. set -u # The pipeline's return status is the value of the last (rightmost) command # to exit with a non-zero status, or zero if all commands exit successfully. set -o pipefail # Default return-code (0 == succes) rc=0 # execute main funciton main exit ${rc}