WIP: separate lib and custom code
All checks were successful
gitea/evobackup/pipeline/head This commit looks good
All checks were successful
gitea/evobackup/pipeline/head This commit looks good
This commit is contained in:
parent
f9aa722ac9
commit
2ea9614e3c
529
client/lib/dump.sh
Normal file
529
client/lib/dump.sh
Normal file
|
@ -0,0 +1,529 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# shellcheck disable=SC2034,SC2317
|
||||||
|
|
||||||
|
mysql_list_databases() {
|
||||||
|
port=${1:-"3306"}
|
||||||
|
|
||||||
|
mysql --defaults-extra-file=/etc/mysql/debian.cnf --port="${port}" --execute="show databases" --silent --skip-column-names \
|
||||||
|
| grep --extended-regexp --invert-match "^(Database|information_schema|performance_schema|sys)"
|
||||||
|
}
|
||||||
|
|
||||||
|
### BEGIN Dump functions ####
|
||||||
|
|
||||||
|
dump_from_lib() {
|
||||||
|
echo "Dump from lib"
|
||||||
|
}
|
||||||
|
|
||||||
|
dump_ldap() {
|
||||||
|
## OpenLDAP : example with slapcat
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/ldap"
|
||||||
|
rm -rf "${dump_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - start dump_ldap to ${dump_dir}"
|
||||||
|
|
||||||
|
slapcat -n 0 -l "${dump_dir}/config.bak"
|
||||||
|
slapcat -n 1 -l "${dump_dir}/data.bak"
|
||||||
|
slapcat -l "${dump_dir}/all.bak"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - stop dump_ldap"
|
||||||
|
}
|
||||||
|
dump_mysql_global() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mysql-global"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
local error_file="${errors_dir}/mysql.bak.err"
|
||||||
|
local dump_file="${dump_dir}/mysql.bak.gz"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 --opt --all-databases --force --events --hex-blob 2> "${error_file}" | gzip --best > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysqldump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
}
|
||||||
|
dump_mysql_per_base() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mysql-per-base"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
databases=$(mysql_list_databases 3306)
|
||||||
|
for database in ${databases}; do
|
||||||
|
local error_file="${errors_dir}/${database}.err"
|
||||||
|
local dump_file="${dump_dir}/${database}.sql.gz"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --events --hex-blob "${database}" 2> "${error_file}" | gzip --best > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysqldump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
dump_mysql_meta() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mysql-meta"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
## Dump all grants (requires 'percona-toolkit' package)
|
||||||
|
local error_file="${errors_dir}/all_grants.err"
|
||||||
|
local dump_file="${dump_dir}/all_grants.sql"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
pt-show-grants --flush --no-header 2> "${error_file}" > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - pt-show-grants to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
|
||||||
|
## Dump all variables
|
||||||
|
local error_file="${errors_dir}/variables.err"
|
||||||
|
local dump_file="${dump_dir}/variables.txt"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
mysql -A -e "SHOW GLOBAL VARIABLES;" 2> "${error_file}" > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysql 'show variables' returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
|
||||||
|
## Schema only (no data) for each databases
|
||||||
|
databases=$(mysql_list_databases 3306)
|
||||||
|
for database in ${databases}; do
|
||||||
|
local error_file="${errors_dir}/${database}.schema.err"
|
||||||
|
local dump_file="${dump_dir}/${database}.schema.sql"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --no-data --databases "${database}" 2> "${error_file}" > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysqldump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
dump_mysql_tabs() {
|
||||||
|
databases=$(mysql_list_databases 3306)
|
||||||
|
for database in ${databases}; do
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mysql-tabs/${database}"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
chown -RL mysql "${dump_dir}"
|
||||||
|
|
||||||
|
local error_file="${errors_dir}.err"
|
||||||
|
log "LOCAL_TASKS - start ${dump_dir}"
|
||||||
|
|
||||||
|
mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments --fields-enclosed-by='\"' --fields-terminated-by=',' -T "${dump_dir}" "${database}" 2> "${error_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysqldump to ${dump_dir} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_dir}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
dump_mysql_hotcopy() {
|
||||||
|
# customize the list of databases to hot-copy
|
||||||
|
databases=""
|
||||||
|
for database in ${databases}; do
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mysql-hotcopy/${database}"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
local error_file="${errors_dir}.err"
|
||||||
|
log "LOCAL_TASKS - start ${dump_dir}"
|
||||||
|
|
||||||
|
mysqlhotcopy "${database}" "${dump_dir}/" 2> "${error_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysqlhotcopy to ${dump_dir} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_dir}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
dump_mysql_instances() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mysql-instances"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
mysql_user="mysqladmin"
|
||||||
|
mysql_passwd=$(grep -m1 'password = .*' /root/.my.cnf | cut -d " " -f 3)
|
||||||
|
|
||||||
|
# customize list of instances
|
||||||
|
instances=""
|
||||||
|
for instance in ${instances}; do
|
||||||
|
local error_file="${errors_dir}/${instance}.err"
|
||||||
|
local dump_file="${dump_dir}/${instance}.bak.gz"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
mysqldump --port="${instance}" --opt --all-databases --hex-blob --user="${mysql_user}" --password="${mysql_passwd}" 2> "${error_file}" | gzip --best > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mysqldump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
dump_postgresql_global() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/postgresql-global"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
## example with pg_dumpall and with compression
|
||||||
|
local dump_file="${dump_dir}/pg.dump.bak.gz"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
(sudo -u postgres pg_dumpall) 2> "${error_file}" | gzip --best > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - pg_dumpall to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
|
||||||
|
## example with pg_dumpall and without compression
|
||||||
|
## WARNING: you need space in ~postgres
|
||||||
|
# local dump_file="${dump_dir}/pg.dump.bak"
|
||||||
|
# log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
#
|
||||||
|
# (su - postgres -c "pg_dumpall > ~/pg.dump.bak") 2> "${error_file}"
|
||||||
|
# mv ~postgres/pg.dump.bak "${dump_file}"
|
||||||
|
#
|
||||||
|
# log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
}
|
||||||
|
dump_postgresql_per_base() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/postgresql-per-base"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
(
|
||||||
|
# shellcheck disable=SC2164
|
||||||
|
cd /var/lib/postgresql
|
||||||
|
databases=$(sudo -u postgres psql -U postgres -lt | awk -F\| '{print $1}' | grep -v "template.*")
|
||||||
|
for database in ${databases} ; do
|
||||||
|
local error_file="${errors_dir}/${database}.err"
|
||||||
|
local dump_file="${dump_dir}/${database}.sql.gz"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
(sudo -u postgres /usr/bin/pg_dump --create -s -U postgres -d "${database}") 2> "${error_file}" | gzip --best > "${dump_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - pg_dump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
done
|
||||||
|
)
|
||||||
|
}
|
||||||
|
dump_postgresql_filtered() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/postgresql-filtered"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
local error_file="${errors_dir}/pg-backup.err"
|
||||||
|
local dump_file="${dump_dir}/pg-backup.tar"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
## example with all tables from MYBASE excepts TABLE1 and TABLE2
|
||||||
|
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f "${dump_file}" -t 'TABLE1' -t 'TABLE2' MYBASE 2> "${error_file}"
|
||||||
|
|
||||||
|
## example with only TABLE1 and TABLE2 from MYBASE
|
||||||
|
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f "${dump_file}" -T 'TABLE1' -T 'TABLE2' MYBASE 2> "${error_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - pg_dump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
}
|
||||||
|
dump_redis() {
|
||||||
|
instances=$(find /var/lib/ -mindepth 1 -maxdepth 1 -type d -name 'redis*')
|
||||||
|
for instance in ${instances}; do
|
||||||
|
name=$(basename "${instance}")
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/${name}"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
if [ -f "${instance}/dump.rdb" ]; then
|
||||||
|
local error_file="${errors_dir}/${instance}.err"
|
||||||
|
log "LOCAL_TASKS - start ${dump_dir}"
|
||||||
|
|
||||||
|
cp -a "${instance}/dump.rdb" "${dump_dir}/" 2> "${error_file}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - cp ${instance}/dump.rdb to ${dump_dir} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_dir}"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
dump_mongodb() {
|
||||||
|
## don't forget to create use with read-only access
|
||||||
|
## > use admin
|
||||||
|
## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } )
|
||||||
|
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/mongodump"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
local error_file="${errors_dir}.err"
|
||||||
|
log "LOCAL_TASKS - start ${dump_dir}"
|
||||||
|
|
||||||
|
mongo_user=""
|
||||||
|
mongo_password=""
|
||||||
|
|
||||||
|
mongodump -u "${mongo_user}" -p"${mongo_password}" -o "${dump_dir}/" 2> "${error_file}" > /dev/null
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - mongodump to ${dump_dir} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_dir}"
|
||||||
|
}
|
||||||
|
dump_megacli_config() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/megacli"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
local dump_file="${dump_dir}/megacli.cfg"
|
||||||
|
local error_file="${errors_dir}/megacli.err"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
megacli -CfgSave -f "${dump_file}" -a0 2> "${error_file}" > /dev/null
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - megacli to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
}
|
||||||
|
dump_traceroute() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/traceroute"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
network_targets="8.8.8.8 www.evolix.fr travaux.evolix.net"
|
||||||
|
|
||||||
|
mtr_bin=$(command -v mtr)
|
||||||
|
if [ -n "${network_targets}" ] && [ -n "${mtr_bin}" ]; then
|
||||||
|
for addr in ${network_targets}; do
|
||||||
|
local dump_file="${dump_dir}/mtr-${addr}"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
${mtr_bin} -r "${addr}" > "${dump_file}"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
traceroute_bin=$(command -v traceroute)
|
||||||
|
if [ -n "${network_targets}" ] && [ -n "${traceroute_bin}" ]; then
|
||||||
|
for addr in ${network_targets}; do
|
||||||
|
local dump_file="${dump_dir}/traceroute-${addr}"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
${traceroute_bin} -n "${addr}" > "${dump_file}" 2>&1
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
dump_server_state() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/server-state"
|
||||||
|
rm -rf "${dump_dir}"
|
||||||
|
# Do not create the directory
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
# mkdir -p -m 700 "${dump_dir}"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - start ${dump_dir}"
|
||||||
|
|
||||||
|
dump_server_state_bin=$(command -v dump-server-state)
|
||||||
|
if [ -z "${dump_server_state_bin}" ]; then
|
||||||
|
log_error "LOCAL_TASKS - dump-server-state is missing"
|
||||||
|
rc=1
|
||||||
|
else
|
||||||
|
if [ "${SYSTEM}" = "linux" ]; then
|
||||||
|
${dump_server_state_bin} --all --dump-dir "${dump_dir}"
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - dump-server-state returned an error ${last_rc}, check ${dump_dir}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
${dump_server_state_bin} --all --dump-dir "${dump_dir}"
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - dump-server-state returned an error ${last_rc}, check ${dump_dir}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_dir}"
|
||||||
|
}
|
||||||
|
dump_rabbitmq() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/rabbitmq"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
local error_file="${errors_dir}.err"
|
||||||
|
local dump_file="${dump_dir}/config"
|
||||||
|
log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
|
||||||
|
rabbitmqadmin export "${dump_file}" 2> "${error_file}" >> "${LOGFILE}"
|
||||||
|
|
||||||
|
local last_rc=$?
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
if [ ${last_rc} -ne 0 ]; then
|
||||||
|
log_error "LOCAL_TASKS - pg_dump to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
else
|
||||||
|
rm -f "${error_file}"
|
||||||
|
fi
|
||||||
|
log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
}
|
||||||
|
dump_facl() {
|
||||||
|
local dump_dir="${LOCAL_BACKUP_DIR}/facl"
|
||||||
|
local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - start ${dump_dir}"
|
||||||
|
|
||||||
|
getfacl -R /etc > "${dump_dir}/etc.txt"
|
||||||
|
getfacl -R /home > "${dump_dir}/home.txt"
|
||||||
|
getfacl -R /usr > "${dump_dir}/usr.txt"
|
||||||
|
getfacl -R /var > "${dump_dir}/var.txt"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - stop ${dump_dir}"
|
||||||
|
}
|
||||||
|
dump_elasticsearch_snapshot() {
|
||||||
|
log "LOCAL_TASKS - start dump_elasticsearch_snapshot"
|
||||||
|
|
||||||
|
## Take a snapshot as a backup.
|
||||||
|
## Warning: You need to have a path.repo configured.
|
||||||
|
## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes
|
||||||
|
|
||||||
|
curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" >> "${LOGFILE}"
|
||||||
|
curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" >> "${LOGFILE}"
|
||||||
|
|
||||||
|
# Clustered version here
|
||||||
|
# It basically the same thing except that you need to check that NFS is mounted
|
||||||
|
# if ss | grep ':nfs' | grep -q 'ip\.add\.res\.s1' && ss | grep ':nfs' | grep -q 'ip\.add\.res\.s2'
|
||||||
|
# then
|
||||||
|
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" >> "${LOGFILE}"
|
||||||
|
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot.daily?wait_for_completion=true" >> "${LOGFILE}"
|
||||||
|
# else
|
||||||
|
# echo 'Cannot make a snapshot of elasticsearch, at least one node is not mounting the repository.'
|
||||||
|
# fi
|
||||||
|
|
||||||
|
## If you need to keep older snapshot, for example the last 10 daily snapshots, replace the XDELETE and XPUT lines by :
|
||||||
|
# for snapshot in $(curl -s -XGET "localhost:9200/_snapshot/snaprepo/_all?pretty=true" | grep -Eo 'snapshot_[0-9]{4}-[0-9]{2}-[0-9]{2}' | head -n -10); do
|
||||||
|
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/${snapshot}" | grep -v -Fx '{"acknowledged":true}'
|
||||||
|
# done
|
||||||
|
# date=$(/bin/date +%F)
|
||||||
|
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" >> "${LOGFILE}"
|
||||||
|
|
||||||
|
log "LOCAL_TASKS - stop dump_elasticsearch_snapshot"
|
||||||
|
}
|
430
client/lib/main.sh
Normal file
430
client/lib/main.sh
Normal file
|
@ -0,0 +1,430 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# shellcheck disable=SC2034,SC2317
|
||||||
|
|
||||||
|
readonly VERSION="23.1-pre"
|
||||||
|
|
||||||
|
# set all programs to C language (english)
|
||||||
|
export LC_ALL=C
|
||||||
|
|
||||||
|
# If expansion is attempted on an unset variable or parameter, the shell prints an
|
||||||
|
# error message, and, if not interactive, exits with a non-zero status.
|
||||||
|
set -u
|
||||||
|
# The pipeline's return status is the value of the last (rightmost) command
|
||||||
|
# to exit with a non-zero status, or zero if all commands exit successfully.
|
||||||
|
set -o pipefail
|
||||||
|
|
||||||
|
local_tasks() {
|
||||||
|
log_error "The 'local_tasks' function hasn't been customized"
|
||||||
|
}
|
||||||
|
# Called from main, it is wrapping the local_tasks function defined in the real script
|
||||||
|
local_tasks_wrapper() {
|
||||||
|
log "START LOCAL_TASKS"
|
||||||
|
|
||||||
|
# Remove old log directories
|
||||||
|
find "${LOCAL_BACKUP_DIR}/" -type d -name "${PROGNAME}.errors-*" -ctime +30 -delete
|
||||||
|
|
||||||
|
# This function must be defined in the calling script
|
||||||
|
local_tasks
|
||||||
|
|
||||||
|
# TODO: check if this is still needed
|
||||||
|
# print_error_files_content
|
||||||
|
|
||||||
|
log "STOP LOCAL_TASKS"
|
||||||
|
}
|
||||||
|
sync_tasks() {
|
||||||
|
log_error "The 'sync_tasks' function hasn't been customized"
|
||||||
|
}
|
||||||
|
# Called from main, it is wrapping the sync_tasks function defined in the real script
|
||||||
|
sync_tasks_wrapper() {
|
||||||
|
declare -a SERVERS # Indexed array for server/port values
|
||||||
|
declare -a RSYNC_INCLUDES # Indexed array for includes
|
||||||
|
declare -a RSYNC_EXCLUDES # Indexed array for excludes
|
||||||
|
|
||||||
|
case "${SYSTEM}" in
|
||||||
|
linux)
|
||||||
|
declare -a rsync_default_includes=(
|
||||||
|
/bin
|
||||||
|
/boot
|
||||||
|
/lib
|
||||||
|
/opt
|
||||||
|
/sbin
|
||||||
|
/usr
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
*bsd)
|
||||||
|
declare -a rsync_default_includes=(
|
||||||
|
/bin
|
||||||
|
/bsd
|
||||||
|
/sbin
|
||||||
|
/usr
|
||||||
|
)
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown system '${SYSTEM}'" >&2
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
if [ -f "${CANARY_FILE}" ]; then
|
||||||
|
rsync_default_includes+=("${CANARY_FILE}")
|
||||||
|
fi
|
||||||
|
readonly rsync_default_includes
|
||||||
|
|
||||||
|
declare -a rsync_default_excludes=(
|
||||||
|
/dev
|
||||||
|
/proc
|
||||||
|
/run
|
||||||
|
/sys
|
||||||
|
/tmp
|
||||||
|
/usr/doc
|
||||||
|
/usr/obj
|
||||||
|
/usr/share/doc
|
||||||
|
/usr/src
|
||||||
|
/var/apt
|
||||||
|
/var/cache
|
||||||
|
/var/db/munin/*.tmp
|
||||||
|
/var/lib/amavis/amavisd.sock
|
||||||
|
/var/lib/amavis/tmp
|
||||||
|
/var/lib/clamav/*.tmp
|
||||||
|
/var/lib/elasticsearch
|
||||||
|
/var/lib/metche
|
||||||
|
/var/lib/mongodb
|
||||||
|
/var/lib/munin/*tmp*
|
||||||
|
/var/lib/mysql
|
||||||
|
/var/lib/php/sessions
|
||||||
|
/var/lib/php5
|
||||||
|
/var/lib/postgres
|
||||||
|
/var/lib/postgresql
|
||||||
|
/var/lib/sympa
|
||||||
|
/var/lock
|
||||||
|
/var/run
|
||||||
|
/var/spool/postfix
|
||||||
|
/var/spool/smtpd
|
||||||
|
/var/spool/squid
|
||||||
|
/var/state
|
||||||
|
/var/tmp
|
||||||
|
lost+found
|
||||||
|
.nfs.*
|
||||||
|
lxc/*/rootfs/tmp
|
||||||
|
lxc/*/rootfs/usr/doc
|
||||||
|
lxc/*/rootfs/usr/obj
|
||||||
|
lxc/*/rootfs/usr/share/doc
|
||||||
|
lxc/*/rootfs/usr/src
|
||||||
|
lxc/*/rootfs/var/apt
|
||||||
|
lxc/*/rootfs/var/cache
|
||||||
|
lxc/*/rootfs/var/lib/php5
|
||||||
|
lxc/*/rootfs/var/lib/php/sessions
|
||||||
|
lxc/*/rootfs/var/lock
|
||||||
|
lxc/*/rootfs/var/run
|
||||||
|
lxc/*/rootfs/var/state
|
||||||
|
lxc/*/rootfs/var/tmp
|
||||||
|
/home/mysqltmp
|
||||||
|
)
|
||||||
|
readonly rsync_default_excludes
|
||||||
|
|
||||||
|
# This function must be defined in the calling script
|
||||||
|
sync_tasks
|
||||||
|
}
|
||||||
|
|
||||||
|
sync() {
|
||||||
|
local sync_name=${1}
|
||||||
|
local -a rsync_servers=("${!2}")
|
||||||
|
local -a rsync_includes=("${!3}")
|
||||||
|
local -a rsync_excludes=("${!4}")
|
||||||
|
|
||||||
|
## Initialize variable to store SSH connection errors
|
||||||
|
declare -a SSH_ERRORS=()
|
||||||
|
|
||||||
|
# echo "### sync ###"
|
||||||
|
|
||||||
|
# for server in "${rsync_servers[@]}"; do
|
||||||
|
# echo "server: ${server}"
|
||||||
|
# done
|
||||||
|
|
||||||
|
# for include in "${rsync_includes[@]}"; do
|
||||||
|
# echo "include: ${include}"
|
||||||
|
# done
|
||||||
|
|
||||||
|
# for exclude in "${rsync_excludes[@]}"; do
|
||||||
|
# echo "exclude: ${exclude}"
|
||||||
|
# done
|
||||||
|
|
||||||
|
local -i n=0
|
||||||
|
local server=""
|
||||||
|
if [ "${SERVERS_FALLBACK}" = "1" ]; then
|
||||||
|
# We try to find a suitable server
|
||||||
|
while :; do
|
||||||
|
server=$(pick_server ${n})
|
||||||
|
test $? = 0 || exit ${E_NOSRVAVAIL}
|
||||||
|
|
||||||
|
if test_server "${server}"; then
|
||||||
|
break
|
||||||
|
else
|
||||||
|
server=""
|
||||||
|
n=$(( n + 1 ))
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
# we force the server
|
||||||
|
server=$(pick_server "${n}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
rsync_server=$(echo "${server}" | cut -d':' -f1)
|
||||||
|
rsync_port=$(echo "${server}" | cut -d':' -f2)
|
||||||
|
|
||||||
|
log "START SYNC_TASKS - ${sync_name} : server=${server}"
|
||||||
|
|
||||||
|
# Rsync complete log file for the current run
|
||||||
|
RSYNC_LOGFILE="/var/log/${PROGNAME}.${sync_name}.rsync.log"
|
||||||
|
# Rsync stats for the current run
|
||||||
|
RSYNC_STATSFILE="/var/log/${PROGNAME}.${sync_name}.rsync-stats.log"
|
||||||
|
|
||||||
|
# reset Rsync log file
|
||||||
|
if [ -n "$(command -v truncate)" ]; then
|
||||||
|
truncate -s 0 "${RSYNC_LOGFILE}"
|
||||||
|
truncate -s 0 "${RSYNC_STATSFILE}"
|
||||||
|
else
|
||||||
|
printf "" > "${RSYNC_LOGFILE}"
|
||||||
|
printf "" > "${RSYNC_STATSFILE}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${MTREE_ENABLED}" = "1" ]; then
|
||||||
|
mtree_bin=$(command -v mtree)
|
||||||
|
|
||||||
|
if [ -n "${mtree_bin}" ]; then
|
||||||
|
# Dump filesystem stats with mtree
|
||||||
|
log "SYNC_TASKS - start mtree"
|
||||||
|
|
||||||
|
local -a mtree_files=()
|
||||||
|
|
||||||
|
# Loop over Rsync includes
|
||||||
|
|
||||||
|
for i in "${!rsync_includes[@]}"; do
|
||||||
|
include="${rsync_includes[i]}"
|
||||||
|
|
||||||
|
# … but exclude for mtree what will be excluded by Rsync
|
||||||
|
mtree_excludes_file="$(mktemp --tmpdir "${PROGNAME}.${sync_name}.mtree-excludes.XXXXXX")"
|
||||||
|
add_to_temp_files "${mtree_excludes_file}"
|
||||||
|
|
||||||
|
for j in "${!rsync_excludes[@]}"; do
|
||||||
|
echo "${rsync_excludes[j]}" | grep -E "^([^/]|${include})" | sed -e "s|^${include}|.|" >> "${mtree_excludes_file}"
|
||||||
|
done
|
||||||
|
|
||||||
|
mtree_file="/var/log/evobackup.$(basename "${include}").mtree"
|
||||||
|
add_to_temp_files "${mtree_file}"
|
||||||
|
|
||||||
|
${mtree_bin} -x -c -p "${include}" -X "${mtree_excludes_file}" > "${mtree_file}"
|
||||||
|
mtree_files+=("${mtree_file}")
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ "${#mtree_files[@]}" -le 0 ]; then
|
||||||
|
log_error "SYNC_TASKS - ERROR: mtree didn't produce any file"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "SYNC_TASKS - stop mtree (files: ${mtree_files[*]})"
|
||||||
|
else
|
||||||
|
log "SYNC_TASKS - skip mtree (missing)"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log "SYNC_TASKS - skip mtree (disabled)"
|
||||||
|
fi
|
||||||
|
|
||||||
|
rsync_bin=$(command -v rsync)
|
||||||
|
# Build the final Rsync command
|
||||||
|
|
||||||
|
# Rsync main options
|
||||||
|
rsync_main_args=()
|
||||||
|
rsync_main_args+=(--archive)
|
||||||
|
rsync_main_args+=(--itemize-changes)
|
||||||
|
rsync_main_args+=(--quiet)
|
||||||
|
rsync_main_args+=(--stats)
|
||||||
|
rsync_main_args+=(--human-readable)
|
||||||
|
rsync_main_args+=(--relative)
|
||||||
|
rsync_main_args+=(--partial)
|
||||||
|
rsync_main_args+=(--delete)
|
||||||
|
rsync_main_args+=(--delete-excluded)
|
||||||
|
rsync_main_args+=(--force)
|
||||||
|
rsync_main_args+=(--ignore-errors)
|
||||||
|
rsync_main_args+=(--log-file "${RSYNC_LOGFILE}")
|
||||||
|
rsync_main_args+=(--rsh "ssh -p ${rsync_port} -o 'ConnectTimeout ${SSH_CONNECT_TIMEOUT}'")
|
||||||
|
|
||||||
|
# Rsync excludes
|
||||||
|
for i in "${!rsync_excludes[@]}"; do
|
||||||
|
rsync_main_args+=(--exclude "${rsync_excludes[i]}")
|
||||||
|
done
|
||||||
|
|
||||||
|
# Rsync local sources
|
||||||
|
rsync_main_args+=("${rsync_includes[@]}")
|
||||||
|
|
||||||
|
# Rsync remote destination
|
||||||
|
rsync_main_args+=("root@${rsync_server}:${REMOTE_BACKUP_DIR}/")
|
||||||
|
|
||||||
|
# … log it
|
||||||
|
log "SYNC_TASKS - ${sync_name} Rsync main command : ${rsync_bin} ${rsync_main_args[*]}"
|
||||||
|
|
||||||
|
# … execute it
|
||||||
|
${rsync_bin} "${rsync_main_args[@]}"
|
||||||
|
|
||||||
|
rsync_main_rc=$?
|
||||||
|
|
||||||
|
# Copy last lines of rsync log to the main log
|
||||||
|
tail -n 30 "${RSYNC_LOGFILE}" >> "${LOGFILE}"
|
||||||
|
# Copy Rsync stats to special file
|
||||||
|
tail -n 30 "${RSYNC_LOGFILE}" | grep --invert-match --extended-regexp " [\<\>ch\.\*]\S{10} " > "${RSYNC_STATSFILE}"
|
||||||
|
|
||||||
|
# We ignore rc=24 (vanished files)
|
||||||
|
if [ ${rsync_main_rc} -ne 0 ] && [ ${rsync_main_rc} -ne 24 ]; then
|
||||||
|
log_error "SYNC_TASKS - ${sync_name} Rsync main command returned an error ${rsync_main_rc}" "${LOGFILE}"
|
||||||
|
GLOBAL_RC=${E_SYNCFAILED}
|
||||||
|
else
|
||||||
|
# Build the report Rsync command
|
||||||
|
local -a rsync_report_args
|
||||||
|
|
||||||
|
rsync_report_args=()
|
||||||
|
|
||||||
|
# Rsync options
|
||||||
|
rsync_report_args+=(--rsh "ssh -p ${rsync_port} -o 'ConnectTimeout ${SSH_CONNECT_TIMEOUT}'")
|
||||||
|
|
||||||
|
# Rsync local sources
|
||||||
|
if [ "${#mtree_files[@]}" -gt 0 ]; then
|
||||||
|
# send mtree files if there is any
|
||||||
|
rsync_report_args+=("${mtree_files[@]}")
|
||||||
|
fi
|
||||||
|
if [ -f "${RSYNC_LOGFILE}" ]; then
|
||||||
|
# send rsync full log file if it exists
|
||||||
|
rsync_report_args+=("${RSYNC_LOGFILE}")
|
||||||
|
fi
|
||||||
|
if [ -f "${RSYNC_STATSFILE}" ]; then
|
||||||
|
# send rsync stats log file if it exists
|
||||||
|
rsync_report_args+=("${RSYNC_STATSFILE}")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Rsync remote destination
|
||||||
|
rsync_report_args+=("root@${rsync_server}:${REMOTE_LOG_DIR}/")
|
||||||
|
|
||||||
|
# … log it
|
||||||
|
log "SYNC_TASKS - ${sync_name} Rsync report command : ${rsync_bin} ${rsync_report_args[*]}"
|
||||||
|
|
||||||
|
# … execute it
|
||||||
|
${rsync_bin} "${rsync_report_args[@]}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
log "STOP SYNC_TASKS - ${sync_name} server=${server}"
|
||||||
|
}
|
||||||
|
|
||||||
|
setup() {
|
||||||
|
# Default return-code (0 == succes)
|
||||||
|
declare -i GLOBAL_RC=0
|
||||||
|
|
||||||
|
# Possible error codes
|
||||||
|
readonly E_NOSRVAVAIL=21 # No server is available
|
||||||
|
readonly E_SYNCFAILED=20 # Failed sync task
|
||||||
|
readonly E_DUMPFAILED=10 # Failed dump task
|
||||||
|
|
||||||
|
# explicit PATH
|
||||||
|
PATH=/sbin:/usr/sbin:/bin:/usr/bin:/usr/local/sbin:/usr/local/bin
|
||||||
|
|
||||||
|
# System name (linux, openbsd…)
|
||||||
|
: "${SYSTEM:=$(uname | tr '[:upper:]' '[:lower:]')}"
|
||||||
|
|
||||||
|
# Hostname (for logs and notifications)
|
||||||
|
: "${HOSTNAME:=$(hostname)}"
|
||||||
|
|
||||||
|
# Store pid in a file named after this program's name
|
||||||
|
: "${PROGNAME:=$(basename "$0")}"
|
||||||
|
: "${PIDFILE:="/var/run/${PROGNAME}.pid"}"
|
||||||
|
|
||||||
|
# Customize the log path if you want multiple scripts to have separate log files
|
||||||
|
: "${LOGFILE:="/var/log/evobackup.log"}"
|
||||||
|
|
||||||
|
# Canary file to update before executing tasks
|
||||||
|
: "${CANARY_FILE:="/zzz_evobackup_canary"}"
|
||||||
|
|
||||||
|
# Date format for log messages
|
||||||
|
: "${DATE_FORMAT:="%Y-%m-%d %H:%M:%S"}"
|
||||||
|
|
||||||
|
# Should we fallback on other servers when the first one is unreachable?
|
||||||
|
: "${SERVERS_FALLBACK:=1}"
|
||||||
|
# timeout (in seconds) for SSH connections
|
||||||
|
: "${SSH_CONNECT_TIMEOUT:=90}"
|
||||||
|
|
||||||
|
: "${LOCAL_BACKUP_DIR:="/home/backup"}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${LOCAL_BACKUP_DIR}"
|
||||||
|
|
||||||
|
: "${ERRORS_DIR:="${LOCAL_BACKUP_DIR}/${PROGNAME}.errors-${START_TIME}"}"
|
||||||
|
# shellcheck disable=SC2174
|
||||||
|
mkdir -p -m 700 "${ERRORS_DIR}"
|
||||||
|
|
||||||
|
# Backup directory on remote server
|
||||||
|
: "${REMOTE_BACKUP_DIR:="/var/backup"}"
|
||||||
|
# Log directory in remote server
|
||||||
|
: "${REMOTE_LOG_DIR:="/var/log"}"
|
||||||
|
|
||||||
|
# Email address for notifications
|
||||||
|
: "${MAIL:="root"}"
|
||||||
|
|
||||||
|
# Email subject for notifications
|
||||||
|
: "${MAIL_SUBJECT:="[info] EvoBackup - Client ${HOSTNAME}"}"
|
||||||
|
|
||||||
|
# Enable/disable local tasks (default: enabled)
|
||||||
|
: "${LOCAL_TASKS:=1}"
|
||||||
|
# Enable/disable sync tasks (default: enabled)
|
||||||
|
: "${SYNC_TASKS:=1}"
|
||||||
|
|
||||||
|
# Enable/disable mtree (default: enabled)
|
||||||
|
: "${MTREE_ENABLED:=1}"
|
||||||
|
|
||||||
|
## Force umask
|
||||||
|
umask 077
|
||||||
|
|
||||||
|
# Initialize a list of temporary files
|
||||||
|
declare -a TEMP_FILES=()
|
||||||
|
# Any file in this list will be deleted when the program exits
|
||||||
|
trap "clean_temp_files" EXIT
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
# Start timer
|
||||||
|
START_EPOCH=$(/bin/date +%s)
|
||||||
|
START_TIME=$(/bin/date +"%Y%m%d%H%M%S")
|
||||||
|
|
||||||
|
# Configure variables and environment
|
||||||
|
setup
|
||||||
|
|
||||||
|
log "START GLOBAL - VERSION=${VERSION} LOCAL_TASKS=${LOCAL_TASKS} SYNC_TASKS=${SYNC_TASKS}"
|
||||||
|
|
||||||
|
# /!\ Only one backup processus can run at the sametime /!\
|
||||||
|
# Based on PID file, kill any running process before continuing
|
||||||
|
enforce_single_process "${PIDFILE}"
|
||||||
|
|
||||||
|
# Update canary to keep track of each run
|
||||||
|
update-evobackup-canary --who "${PROGNAME}" --file "${CANARY_FILE}"
|
||||||
|
|
||||||
|
if [ "${LOCAL_TASKS}" = "1" ]; then
|
||||||
|
local_tasks_wrapper
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${SYNC_TASKS}" = "1" ]; then
|
||||||
|
sync_tasks_wrapper
|
||||||
|
fi
|
||||||
|
|
||||||
|
STOP_EPOCH=$(/bin/date +%s)
|
||||||
|
|
||||||
|
case "${SYSTEM}" in
|
||||||
|
*bsd)
|
||||||
|
start_time=$(/bin/date -f "%s" -j "${START_EPOCH}" +"${DATE_FORMAT}")
|
||||||
|
stop_time=$(/bin/date -f "%s" -j "${STOP_EPOCH}" +"${DATE_FORMAT}")
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
start_time=$(/bin/date --date="@${START_EPOCH}" +"${DATE_FORMAT}")
|
||||||
|
stop_time=$(/bin/date --date="@${STOP_EPOCH}" +"${DATE_FORMAT}")
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
duration=$(( STOP_EPOCH - START_EPOCH ))
|
||||||
|
|
||||||
|
log "STOP GLOBAL - start='${start_time}' stop='${stop_time}' duration=${duration}s"
|
||||||
|
|
||||||
|
send_mail
|
||||||
|
|
||||||
|
exit ${GLOBAL_RC}
|
||||||
|
}
|
136
client/lib/utilities.sh
Normal file
136
client/lib/utilities.sh
Normal file
|
@ -0,0 +1,136 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Output a message to the log file
|
||||||
|
log() {
|
||||||
|
local msg="${1:-$(cat /dev/stdin)}"
|
||||||
|
local pid=$$
|
||||||
|
|
||||||
|
printf "[%s] %s[%s]: %s\\n" \
|
||||||
|
"$(/bin/date +"${DATE_FORMAT}")" "${PROGNAME}" "${pid}" "${msg}" \
|
||||||
|
>> "${LOGFILE}"
|
||||||
|
}
|
||||||
|
log_error() {
|
||||||
|
local error_msg=${1}
|
||||||
|
local error_file=${2:""}
|
||||||
|
|
||||||
|
if [ -n "${error_file}" ] && [ -f "${error_file}" ]; then
|
||||||
|
printf "\n### %s\n" "${error_msg}" >&2
|
||||||
|
# shellcheck disable=SC2046
|
||||||
|
if [ $(wc -l "${error_file}") -gt 30 ]; then
|
||||||
|
printf "~~~{%s (tail -30)}\n" "${error_file}" >&2
|
||||||
|
tail -n 30 "${error_file}" >&2
|
||||||
|
else
|
||||||
|
printf "~~~{%s}\n" "${error_file}" >&2
|
||||||
|
cat "${error_file}" >&2
|
||||||
|
fi
|
||||||
|
printf "~~~\n" >&2
|
||||||
|
|
||||||
|
log "${error_msg}, check ${error_file}"
|
||||||
|
else
|
||||||
|
printf "\n### %s\n" "${error_msg}" >&2
|
||||||
|
|
||||||
|
log "${error_msg}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
add_to_temp_files() {
|
||||||
|
TEMP_FILES+=("${1}")
|
||||||
|
}
|
||||||
|
# Remove all temporary file created during the execution
|
||||||
|
clean_temp_files() {
|
||||||
|
# shellcheck disable=SC2086
|
||||||
|
rm -f "${TEMP_FILES[@]}"
|
||||||
|
}
|
||||||
|
enforce_single_process() {
|
||||||
|
local pidfile=$1
|
||||||
|
|
||||||
|
if [ -e "${pidfile}" ]; then
|
||||||
|
pid=$(cat "${pidfile}")
|
||||||
|
# Does process still exist?
|
||||||
|
if kill -0 "${pid}" 2> /dev/null; then
|
||||||
|
# Killing the childs of evobackup.
|
||||||
|
for ppid in $(pgrep -P "${pid}"); do
|
||||||
|
kill -9 "${ppid}";
|
||||||
|
done
|
||||||
|
# Then kill the main PID.
|
||||||
|
kill -9 "${pid}"
|
||||||
|
printf "%s is still running (PID %s). Process has been killed" "$0" "${pid}\\n" >&2
|
||||||
|
else
|
||||||
|
rm -f "${pidfile}"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
add_to_temp_files "${pidfile}"
|
||||||
|
|
||||||
|
echo "$$" > "${pidfile}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Build the error directory (inside ERRORS_DIR) based on the dump directory path
|
||||||
|
errors_dir_from_dump_dir() {
|
||||||
|
local dump_dir=$1
|
||||||
|
local relative_path=$(realpath --relative-to="${LOCAL_BACKUP_DIR}" "${dump_dir}")
|
||||||
|
|
||||||
|
# return absolute path
|
||||||
|
realpath --canonicalize-missing "${ERRORS_DIR}/${relative_path}"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Call test_server with "HOST:PORT" string
|
||||||
|
# It will return with 0 if the server is reachable.
|
||||||
|
# It will return with 1 and a message on stderr if not.
|
||||||
|
test_server() {
|
||||||
|
local item=$1
|
||||||
|
# split HOST and PORT from the input string
|
||||||
|
local host=$(echo "${item}" | cut -d':' -f1)
|
||||||
|
local port=$(echo "${item}" | cut -d':' -f2)
|
||||||
|
|
||||||
|
local new_error
|
||||||
|
|
||||||
|
# Test if the server is accepting connections
|
||||||
|
ssh -q -o "ConnectTimeout ${SSH_CONNECT_TIMEOUT}" "${host}" -p "${port}" -t "exit"
|
||||||
|
# shellcheck disable=SC2181
|
||||||
|
if [ $? = 0 ]; then
|
||||||
|
# SSH connection is OK
|
||||||
|
return 0
|
||||||
|
else
|
||||||
|
# SSH connection failed
|
||||||
|
new_error=$(printf "Failed to connect to \`%s' within %s seconds" "${item}" "${SSH_CONNECT_TIMEOUT}")
|
||||||
|
log "${new_error}"
|
||||||
|
SSH_ERRORS+=("${new_error}")
|
||||||
|
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Call pick_server with an optional positive integer to get the nth server in the list.
|
||||||
|
pick_server() {
|
||||||
|
local -i increment=${1:-0}
|
||||||
|
local -i list_length=${#SERVERS[@]}
|
||||||
|
|
||||||
|
if (( increment >= list_length )); then
|
||||||
|
# We've reached the end of the list
|
||||||
|
new_error="No more server available"
|
||||||
|
log "${new_error}"
|
||||||
|
SSH_ERRORS+=("${new_error}")
|
||||||
|
|
||||||
|
# Log errors to stderr
|
||||||
|
for i in "${!SSH_ERRORS[@]}"; do
|
||||||
|
printf "%s\n" "${SSH_ERRORS[i]}" >&2
|
||||||
|
done
|
||||||
|
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Extract the day of month, without leading 0 (which would give an octal based number)
|
||||||
|
today=$(/bin/date +%e)
|
||||||
|
# A salt is useful to randomize the starting point in the list
|
||||||
|
# but stay identical each time it's called for a server (based on hostname).
|
||||||
|
salt=$(hostname | cksum | cut -d' ' -f1)
|
||||||
|
# Pick an integer between 0 and the length of the SERVERS list
|
||||||
|
# It changes each day
|
||||||
|
n=$(( (today + salt + increment) % list_length ))
|
||||||
|
|
||||||
|
echo "${SERVERS[n]}"
|
||||||
|
}
|
||||||
|
|
||||||
|
send_mail() {
|
||||||
|
tail -20 "${LOGFILE}" | mail -s "${MAIL_SUBJECT}" "${MAIL}"
|
||||||
|
}
|
256
client/zzz_evobackup.sh
Normal file
256
client/zzz_evobackup.sh
Normal file
|
@ -0,0 +1,256 @@
|
||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Script Evobackup client
|
||||||
|
# See https://gitea.evolix.org/evolix/evobackup
|
||||||
|
#
|
||||||
|
# Authors: Evolix <info@evolix.fr>,
|
||||||
|
# Gregory Colpart <reg@evolix.fr>,
|
||||||
|
# Romain Dessort <rdessort@evolix.fr>,
|
||||||
|
# Benoit Série <bserie@evolix.fr>,
|
||||||
|
# Tristan Pilat <tpilat@evolix.fr>,
|
||||||
|
# Victor Laborie <vlaborie@evolix.fr>,
|
||||||
|
# Jérémy Lecour <jlecour@evolix.fr>
|
||||||
|
# and others.
|
||||||
|
#
|
||||||
|
# Licence: AGPLv3
|
||||||
|
|
||||||
|
source ./lib/utilities.sh
|
||||||
|
source ./lib/dump.sh
|
||||||
|
source ./lib/main.sh
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
#
|
||||||
|
# You must configure the MAIL variable to receive notifications.
|
||||||
|
#
|
||||||
|
# There is some optional configuration that you can do
|
||||||
|
# at the end of this script.
|
||||||
|
#
|
||||||
|
# The library (usually installed at /usr/local/lib/evobackup/main.sh)
|
||||||
|
# also has many variables that you can override for fine-tuning.
|
||||||
|
#
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
# Email adress for notifications
|
||||||
|
MAIL=jdoe@example.com
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
#
|
||||||
|
# The "sync_tasks" function will be called by the main function.
|
||||||
|
#
|
||||||
|
# You can customize the variables:
|
||||||
|
# * "sync_name" (String)
|
||||||
|
# * "SERVERS" (Array of HOST:PORT)
|
||||||
|
# * "RSYNC_INCLUDES" (Array of paths to include)
|
||||||
|
# * "RSYNC_EXCLUDES" (Array of paths to exclude)
|
||||||
|
#
|
||||||
|
# The "sync" function can be called multiple times
|
||||||
|
# with a different set of variables.
|
||||||
|
# That way you can to sync to various destinations.
|
||||||
|
#
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
sync_tasks() {
|
||||||
|
|
||||||
|
########## System-only backup (to Evolix servers) #################
|
||||||
|
|
||||||
|
# Name your sync task, for logs
|
||||||
|
sync_name="evolix-system"
|
||||||
|
|
||||||
|
# List of host/port for your sync task
|
||||||
|
SERVERS=(
|
||||||
|
node0.backup.evolix.net:2234
|
||||||
|
node1.backup.evolix.net:2234
|
||||||
|
)
|
||||||
|
|
||||||
|
# What to include in your sync task
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
RSYNC_INCLUDES=(
|
||||||
|
"${rsync_default_includes[@]}"
|
||||||
|
/etc
|
||||||
|
/root
|
||||||
|
/var
|
||||||
|
)
|
||||||
|
|
||||||
|
# What to exclude from your sync task
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
RSYNC_EXCLUDES=(
|
||||||
|
"${rsync_default_excludes[@]}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Call the sync task
|
||||||
|
sync "${sync_name}" "SERVERS[@]" "RSYNC_INCLUDES[@]" "RSYNC_EXCLUDES[@]"
|
||||||
|
|
||||||
|
|
||||||
|
########## Full backup (to client servers) ########################
|
||||||
|
|
||||||
|
# Name your sync task, for logs
|
||||||
|
sync_name="client-full"
|
||||||
|
|
||||||
|
# List of host/port for your sync task
|
||||||
|
SERVERS=(
|
||||||
|
client-backup00.evolix.net:2221
|
||||||
|
client-backup01.evolix.net:2221
|
||||||
|
)
|
||||||
|
|
||||||
|
# What to include in your sync task
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
RSYNC_INCLUDES=(
|
||||||
|
"${rsync_default_includes[@]}"
|
||||||
|
/etc
|
||||||
|
/root
|
||||||
|
/var
|
||||||
|
/home
|
||||||
|
/srv
|
||||||
|
)
|
||||||
|
|
||||||
|
# What to exclude from your sync task
|
||||||
|
# shellcheck disable=SC2034
|
||||||
|
RSYNC_EXCLUDES=(
|
||||||
|
"${rsync_default_excludes[@]}"
|
||||||
|
/home/foo
|
||||||
|
)
|
||||||
|
|
||||||
|
# Call the sync task
|
||||||
|
sync "${sync_name}" "SERVERS[@]" "RSYNC_INCLUDES[@]" "RSYNC_EXCLUDES[@]"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
#######################################################################
|
||||||
|
#
|
||||||
|
# The "local_tasks" function will be called by the main function.
|
||||||
|
#
|
||||||
|
# You can call any available "dump_xxx" function
|
||||||
|
# (usually installed at /usr/local/lib/evobackup/dump.sh)
|
||||||
|
#
|
||||||
|
# You can also write some custom functions and call them.
|
||||||
|
# A "dump_custom" example is available further down.
|
||||||
|
#
|
||||||
|
#######################################################################
|
||||||
|
|
||||||
|
local_tasks() {
|
||||||
|
|
||||||
|
########## OpenLDAP ###############
|
||||||
|
|
||||||
|
### dump_ldap
|
||||||
|
|
||||||
|
########## MySQL ##################
|
||||||
|
|
||||||
|
# Dump all grants (permissions), config variables and schema of databases
|
||||||
|
### dump_mysql_meta
|
||||||
|
|
||||||
|
# Dump all databases in a single compressed file
|
||||||
|
### dump_mysql_global
|
||||||
|
|
||||||
|
# Dump each database separately, in a compressed file
|
||||||
|
### dump_mysql_per_base
|
||||||
|
|
||||||
|
# Dump multiples instances, each in a single compressed file
|
||||||
|
### dump_mysql_instances
|
||||||
|
|
||||||
|
# Dump each table in schema/data files, for all databases
|
||||||
|
### dump_mysql_tabs
|
||||||
|
|
||||||
|
# Run mysqlhotcopy for a specific database (must be configured)
|
||||||
|
# dump_mysql_hotcopy
|
||||||
|
|
||||||
|
########## PostgreSQL #############
|
||||||
|
|
||||||
|
# Dump all databases in a single file (compressed or not)
|
||||||
|
### dump_postgresql_global
|
||||||
|
|
||||||
|
# Dump a specific databse with only some tables, or all but some tables (must be configured)
|
||||||
|
### dump_postgresql_filtered
|
||||||
|
|
||||||
|
# Dump each database separately, in a compressed file
|
||||||
|
### dump_postgresql_per_base
|
||||||
|
|
||||||
|
########## MongoDB ################
|
||||||
|
|
||||||
|
### dump_mongodb
|
||||||
|
|
||||||
|
########## Redis ##################
|
||||||
|
|
||||||
|
# Copy data file for all instances
|
||||||
|
### dump_redis
|
||||||
|
|
||||||
|
########## ElasticSearch ##########
|
||||||
|
|
||||||
|
# Trigger snapshots (must be configured)
|
||||||
|
### dump_elasticsearch_snapshot
|
||||||
|
|
||||||
|
########## RabbitMQ ###############
|
||||||
|
|
||||||
|
### dump_rabbitmq
|
||||||
|
|
||||||
|
########## MegaCli ################
|
||||||
|
|
||||||
|
# Copy RAID config
|
||||||
|
### dump_megacli_config
|
||||||
|
|
||||||
|
########## Network ################
|
||||||
|
|
||||||
|
# Dump network routes with mtr and traceroute (warning: could be long with aggressive firewalls)
|
||||||
|
### dump_traceroute
|
||||||
|
|
||||||
|
########## Server state ###########
|
||||||
|
|
||||||
|
# Run dump-server-state to extract system information
|
||||||
|
### dump_server_state
|
||||||
|
|
||||||
|
# Dump file access control lists
|
||||||
|
### dump_facl
|
||||||
|
|
||||||
|
# No-op, in case nothing is enabled
|
||||||
|
:
|
||||||
|
}
|
||||||
|
|
||||||
|
# This is an example for a custom dump function
|
||||||
|
# Uncomment, customize and call it from the "local_tasks" function
|
||||||
|
### dump_custom() {
|
||||||
|
### # Set dump and errors directories and files
|
||||||
|
### local dump_dir="${LOCAL_BACKUP_DIR}/custom"
|
||||||
|
### local dump_file="${dump_dir}/dump.gz"
|
||||||
|
### local errors_dir=$(errors_dir_from_dump_dir "${dump_dir}")
|
||||||
|
### local error_file="${errors_dir}/dump.err"
|
||||||
|
###
|
||||||
|
### # Reset dump and errors directories
|
||||||
|
### rm -rf "${dump_dir}" "${errors_dir}"
|
||||||
|
### # shellcheck disable=SC2174
|
||||||
|
### mkdir -p -m 700 "${dump_dir}" "${errors_dir}"
|
||||||
|
###
|
||||||
|
### # Log the start of the command
|
||||||
|
### log "LOCAL_TASKS - start ${dump_file}"
|
||||||
|
###
|
||||||
|
### # Execute your dump command
|
||||||
|
### # Send errors to the error file and the data to the dump file
|
||||||
|
### my-dump-command 2> "${error_file}" > "${dump_file}"
|
||||||
|
###
|
||||||
|
### # Check result and deal with potential errors
|
||||||
|
### local last_rc=$?
|
||||||
|
### # shellcheck disable=SC2086
|
||||||
|
### if [ ${last_rc} -ne 0 ]; then
|
||||||
|
### log_error "LOCAL_TASKS - my-dump-command to ${dump_file} returned an error ${last_rc}" "${error_file}"
|
||||||
|
### GLOBAL_RC=${E_DUMPFAILED}
|
||||||
|
### else
|
||||||
|
### rm -f "${error_file}"
|
||||||
|
### fi
|
||||||
|
###
|
||||||
|
### # Log the end of the command
|
||||||
|
### log "LOCAL_TASKS - stop ${dump_file}"
|
||||||
|
### }
|
||||||
|
|
||||||
|
########## Optional configuration #####################################
|
||||||
|
|
||||||
|
# If you set a value (like "linux", "openbsd"…) it will be used,
|
||||||
|
# Default: uname(1) in lowercase.
|
||||||
|
### SYSTEM="linux"
|
||||||
|
|
||||||
|
# If you set a value it will be used,
|
||||||
|
# Default: hostname(1).
|
||||||
|
### HOSTNAME="example-host"
|
||||||
|
|
||||||
|
# Email subect for notifications
|
||||||
|
### MAIL_SUBJECT="[info] EvoBackup - Client ${HOSTNAME}"}"
|
||||||
|
|
||||||
|
# Call main function
|
||||||
|
main
|
Loading…
Reference in a new issue