Merge branch 'master' into debian

This commit is contained in:
Victor LABORIE 2020-03-05 14:51:09 +01:00
commit f861410597
4 changed files with 71 additions and 47 deletions

View file

@ -16,16 +16,16 @@ The chroot jails depend on these packages
~~~
apt install \
bash \
coreutils \
sed \
dash \
mount \
rsync \
openssh-server \
openssh-sftp-server \
libc6-i386 \
libc6
bash \
coreutils \
sed \
dash \
mount \
rsync \
openssh-server \
openssh-sftp-server \
libc6-i386 \
libc6
~~~
## Client dependencies
@ -37,11 +37,13 @@ The clients only require OpenSSH and rsync.
Edit the root crontab
~~~
# crontab -e
+ 30 10 * * * /usr/sbin/bkctld inc && /usr/sbin/bkctld rm
# $editor /etc/cron.d/bkctld
+ MAILTO=alert4@evolix.net
+ 30 11 * * * root /usr/sbin/bkctld inc && /usr/sbin/bkctld rm
+ 30 23 * * * root /usr/share/scripts/check-incs.sh 1> /dev/null
~~~
## Notes
If you want mutiples backups in a day (1 by hour maximum) you can
run `bkctld inc` multiples times, if you want to keep incremental
backups **for ever**, just don't run `bkctld rm`.
backups **for ever**, just don't run `bkctld rm`.

View file

@ -15,11 +15,22 @@ for jail in $("${LIBDIR}/bkctld-list"); do
jail_inode=$(stat --format=%i "${JAILDIR}/${jail}")
if [ "$jail_inode" -eq 256 ]; then
/bin/btrfs subvolume snapshot -r "${JAILDIR}/${jail}" "${inc}" | debug
end=$(date +"%H:%M:%S")
notice "${jail} : made ${date} inc [${start}/${end}]"
else
cp -alx "${JAILDIR}/${jail}/" "${inc}" | debug
lock="/run/lock/bkctld/inc-${jail}.lock"
if [ -f "${lock}" ]; then
warning "${jail} : trying to run already running inc"
else
(
mkdir -p /run/lock/bkctld && touch "${lock}"
trap "rm -f ${lock}" 0
cp -alx "${JAILDIR}/${jail}/" "${inc}"
end=$(date +"%H:%M:%S")
notice "${jail} : made ${date} inc [${start}/${end}]"
) &
fi
fi
end=$(date +"%H:%M:%S")
notice "${jail} : made ${date} inc [${start}/${end}]"
else
warning "${jail} : trying to made already existant inc"
fi

View file

@ -6,19 +6,6 @@
LIBDIR="$(dirname $0)" && . "${LIBDIR}/config"
empty="/tmp/bkctld-${$}-$(date +%N))"
mkdir "${empty}"
pidfile="/var/run/bkctld-rm.pid"
if [ -f "${pidfile}" ]; then
pid=$(cat "${pidfile}")
ps -u "${pid}" >/dev/null
if [ "${?}" -eq 0 ]; then
kill -9 "${pid}"
warning "${0} rm always run (PID ${pid}), killed by ${$} !"
fi
rm "${pidfile}"
fi
echo "${$}" > "${pidfile}"
for jail in $("${LIBDIR}/bkctld-list"); do
incs=$(ls "${INCDIR}/${jail}")
if [ -f "${CONFDIR}/${jail}" ]; then
@ -33,16 +20,25 @@ for jail in $("${LIBDIR}/bkctld-list"); do
inc_inode=$(stat --format=%i "${INCDIR}/${jail}/${j}")
if [ "${inc_inode}" -eq 256 ]; then
/bin/btrfs subvolume delete "${INCDIR}/${jail}/${j}" | debug
end=$(date +"%H:%M:%S")
notice "${jail} : deleted ${j} inc [${start}/${end}]"
else
cd "${INCDIR}/${jail}"
rsync -a --delete "${empty}/" "${j}/"
rmdir "${j}"
lock="/run/lock/bkctld/rm-${jail}.lock"
if [ -f "${lock}" ]; then
warning "${jail} : trying to run already running rm"
else
(
empty="/tmp/bkctld-${$}-$(date +%N)"
mkdir -p /run/lock/bkctld && touch "${lock}" && mkdir -p "${empty}"
trap "rm -f ${lock} && rmdir ${empty}" 0
rsync -a --delete "${empty}/" "${INCDIR}/${jail}/${j}/"
rmdir "${j}"
end=$(date +"%H:%M:%S")
notice "${jail} : deleted ${j} inc [${start}/${end}]"
) &
fi
fi
end=$(date +"%H:%M:%S")
notice "${jail} : deleted ${j} inc [${start}/${end}]"
done
rm "${keepfile}"
fi
done
rmdir "${empty}"
rm "${pidfile}"

View file

@ -141,7 +141,7 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
## OpenLDAP : example with slapcat
# slapcat -l ${LOCAL_BACKUP_DIR}/ldap.bak
### MySQL
## MySQL
## example with global and compressed mysqldump
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 \
@ -154,7 +154,14 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 -Q --opt --events --hex-blob --skip-comments \
# --fields-enclosed-by='\"' --fields-terminated-by=',' -T /home/mysqldump/$i $i; done
## example with compressed SQL dump for each databases
## example with SQL dump (schema only, no data) for each databases
# mkdir -p -m 700 /home/mysqldump/
# for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \
# | egrep -v "^(Database|information_schema|performance_schema|sys)"); do
# mysqldump --defaults-extra-file=/etc/mysql/debian.cnf --force -P 3306 --no-data --databases $i > /home/mysqldump/${i}.schema.sql
# done
## example with compressed SQL dump (with data) for each databases
# mkdir -p -m 700 /home/mysqldump/
# for i in $(mysql --defaults-extra-file=/etc/mysql/debian.cnf -P 3306 -e 'show databases' -s --skip-column-names \
# | egrep -v "^(Database|information_schema|performance_schema|sys)"); do
@ -181,7 +188,7 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
# fi
# done
### PostgreSQL
## PostgreSQL
## example with pg_dumpall (warning: you need space in ~postgres)
# su - postgres -c "pg_dumpall > ~/pg.dump.bak"
@ -197,7 +204,8 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
## example with only TABLE1 and TABLE2 from MYBASE
# pg_dump -p 5432 -h 127.0.0.1 -U USER --clean -F t --inserts -f ${LOCAL_BACKUP_DIR}/pg-backup.tar -T 'TABLE1' -T 'TABLE2' MYBASE
## MongoDB : example with mongodump
## MongoDB
## don't forget to create use with read-only access
## > use admin
## > db.createUser( { user: "mongobackup", pwd: "PASS", roles: [ "backup", ] } )
@ -208,10 +216,14 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
# echo "Error with mongodump!"
# fi
## Redis : example with copy .rdb file
## Redis
## example with copy .rdb file
# cp /var/lib/redis/dump.rdb ${LOCAL_BACKUP_DIR}/
## ElasticSearch, take a snapshot as a backup.
## ElasticSearch
## Take a snapshot as a backup.
## Warning: You need to have a path.repo configured.
## See: https://wiki.evolix.org/HowtoElasticsearch#snapshots-et-sauvegardes
# curl -s -XDELETE "localhost:9200/_snapshot/snaprepo/snapshot.daily" -o /tmp/es_delete_snapshot.daily.log
@ -232,10 +244,13 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
# date=$(date +%F)
# curl -s -XPUT "localhost:9200/_snapshot/snaprepo/snapshot_${date}?wait_for_completion=true" -o /tmp/es_snapshot_${date}.log
## RabbitMQ : export config
## RabbitMQ
## export config
#rabbitmqadmin export ${LOCAL_BACKUP_DIR}/rabbitmq.config >> $LOGFILE
# backup MegaCli config
## MegaCli config
#megacli -CfgSave -f ${LOCAL_BACKUP_DIR}/megacli_conf.dump -a0 >/dev/null
## Dump system and kernel versions
@ -260,10 +275,10 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
apt-cache dumpavail > ${LOCAL_BACKUP_DIR}/packages.available
## Dump MBR / table partitions
disks=$(lsblk -l | grep disk | grep -v drbd | awk '{print $1}')
disks=$(lsblk -l | grep disk | grep -v -E '(drbd|fd[0-9]+)' | awk '{print $1}')
for disk in ${disks}; do
dd if="/dev/${disk}" of="${LOCAL_BACKUP_DIR}/MBR-${disk}" bs=512 count=1 2>&1 | grep -Ev "(records in|records out|512 bytes)"
fdisk -l "/dev/${disk}" > "${LOCAL_BACKUP_DIR}/partitions-${disk}"
fdisk -l "/dev/${disk}" > "${LOCAL_BACKUP_DIR}/partitions-${disk}" 2>&1
done
cat ${LOCAL_BACKUP_DIR}/partitions-* > ${LOCAL_BACKUP_DIR}/partitions
@ -288,7 +303,7 @@ if [ "${LOCAL_TASKS}" = "1" ]; then
##disklabel sd0 > ${LOCAL_BACKUP_DIR}/partitions
## Dump pf infos
pfctl -sa |> ${LOCAL_BACKUP_DIR}/pfctl-sa.txt
pfctl -sa > ${LOCAL_BACKUP_DIR}/pfctl-sa.txt
fi