/usr/bin/ikiwiki-hosting-web-backup is in ikiwiki-hosting-web 0.20170622ubuntu1.
This file is owned by root:root, with mode 0o755.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 | #!/bin/bash
# Backs up all sites.
failed=""
. /etc/ikiwiki-hosting/ikiwiki-hosting.conf
LOCKFILE=/var/run/ikiwiki-hosting-web-backup-lockfile
# Use lockfile to avoid multiple jobs running.
# (bash needed because exec 200>file is a bashism)
exec 200>$LOCKFILE
if ! flock --nonblock 200; then
echo "another ikiwiki-hosting-web-backup is already running" >&2
exit 1
fi
trap cleanup EXIT INT
cleanup () {
rm -f $LOCKFILE || true
if [ -n "$backup_ssh_cache" ]; then
kill %1 # stop any backgrounded ssh master process
fi
}
# start master process for ssh connection caching
if [ -n "$backup_ssh_cache" ]; then
ssh -nMN "$backup_ssh_cache" &
fi
for site in $(ikisite list); do
if ( [ -n "$num_backups" ] && [ "$num_backups" -gt 0 ] ) || [ -n "$backup_rsync_urls" ]; then
bdir="/var/backups/ikiwiki-hosting-web/$site"
mkdir -p "$bdir"
# savelog has a minimim -c of 2
if [ -e "$bdir/backup" ] && [ -n "$num_backups" ] && [ "$num_backups" -gt 2 ]; then
savelog -c "$num_backups" "$bdir/backup"
fi
if ! ikisite backup "$site" --filename="$bdir"/backup; then
echo "ikisite backup $site failed!" >&2
failed=1
fi
# rsync backups to somewhere
if [ -n "$backup_rsync_urls" ]; then
for url in $backup_rsync_urls; do
if ! rsync -az $backup_rsync_options "$bdir/backup" "$url$site"; then
failed=1
fi
done
fi
# maybe we don't want to keep backups locally..
if [ -z "$num_backups" ] || [ "$num_backups" = 0 ]; then
rm -f "$bdir/backup"
fi
# delete any obsolete version of the site in the morgue
# (might exist if it got deleted and then recreated)
if [ -n "$morguedir" ]; then
rm -f "$morguedir/$site.backup"
fi
fi
done
if [ -n "$morguedir" ] && [ -d "$morguedir" ] && [ -n "$backup_rsync_urls" ]; then
# backup the morgue to any configured rsync urls.
for url in $backup_rsync_urls; do
if ! rsync -az $backup_rsync_options "$morguedir/" "${url}morgue/"; then
failed=1
fi
done
# For each site in the morgue, zero out any old backup
# of it that might exist on the remote. This is done to avoid
# deleted sites being restored if the backups should be used.
# (We can't properly delete them the way that we're using rsync.)
for file in $(find "$morguedir" -type f); do
site="$(basename "$file" | sed 's/\.backup$//')"
touch "$morguedir/empty"
if ! rsync -a $backup_rsync_options "$morguedir/empty" "$url$site"; then
failed=1
fi
rm -f "$morguedir/empty"
done
fi
if [ "$failed" ]; then
exit 1
else
exit 0
fi
|