Skip to content

Commit

Permalink
Merge pull request #1 from jan-brinkmann/Add-host-check
Browse files Browse the repository at this point in the history
Add host check
  • Loading branch information
jan-brinkmann authored Sep 4, 2021
2 parents 5e0e941 + 8da3c46 commit dff874a
Show file tree
Hide file tree
Showing 5 changed files with 158 additions and 116 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
FROM ubuntu:18.04

RUN apt-get update && apt-get install -y --no-install-recommends curl cron ca-certificates unzip
RUN apt-get update && apt-get install -y --no-install-recommends curl cron ca-certificates iputils-ping unzip
RUN rm -rf /var/lib/apt/lists/*

# Install awscliv2 https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2-linux.html
Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,7 @@ Variable | Default | Notes
`BACKUP_WAIT_SECONDS` | `0` | The backup script will sleep this many seconds between re-starting stopped containers, and proceeding with archiving/uploading the backup. This can be useful if you don't want the load/network spike of a large upload immediately after the load/network spike of container startup.
`BACKUP_HOSTNAME` | `$(hostname)` | Name of the host (i.e. Docker container) in which the backup runs. Mostly useful if you want a specific hostname to be associated with backup metrics (see InfluxDB support).
`BACKUP_CUSTOM_LABEL` | | When provided, the [start/stop](#stopping-containers-while-backing-up) and [pre/post exec](#prepost-backup-exec) logic only applies to containers with this custom label.
`CHECK_HOST` | | When provided, the availability of the named host will be checked. The host should be the destination host of the backups. If the host answers a ping, the backup is conducted as normal. Else, the backup is skipped.
`AWS_S3_BUCKET_NAME` | | When provided, the resulting backup file will be uploaded to this S3 bucket after the backup has ran.
`AWS_GLACIER_VAULT_NAME` | | When provided, the resulting backup file will be uploaded to this AWS Glacier vault after the backup has ran.
`AWS_ACCESS_KEY_ID` | | Required when using `AWS_S3_BUCKET_NAME`.
Expand Down
248 changes: 133 additions & 115 deletions src/backup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -9,140 +9,158 @@ function info {
echo -e "\n$bold[INFO] $1$reset\n"
}

info "Backup starting"
TIME_START="$(date +%s.%N)"
DOCKER_SOCK="/var/run/docker.sock"

if [ ! -z "$BACKUP_CUSTOM_LABEL" ]; then
CUSTOM_LABEL="--filter label=$BACKUP_CUSTOM_LABEL"
fi

if [ -S "$DOCKER_SOCK" ]; then
if [ "$CHECK_HOST" != "false" ]; then
TEMPFILE="$(mktemp)"
docker ps --format "{{.ID}}" --filter "label=docker-volume-backup.stop-during-backup=true" $CUSTOM_LABEL > "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total"
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup"
ping -c 1 $CHECK_HOST | grep '1 packets transmitted, 1 received' > "$TEMPFILE"
PING_RESULT="$(cat $TEMPFILE)"
if [ ! -z "$PING_RESULT" ]; then
skip="false"
echo "$CHECK_HOST is available."
else
skip="true"
echo "$CHECK_HOST is not available."
info "Backup skipped"
fi
else
CONTAINERS_TO_STOP_TOTAL="0"
CONTAINERS_TOTAL="0"
echo "Cannot access \"$DOCKER_SOCK\", won't look for containers to stop"
skip="false"
fi

if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Stopping containers"
docker stop $CONTAINERS_TO_STOP
fi
if [ "$skip" == "false" ]; then
info "Backup starting"
TIME_START="$(date +%s.%N)"
DOCKER_SOCK="/var/run/docker.sock"

if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--filter "label=docker-volume-backup.exec-pre-backup" $CUSTOM_LABEL \
--format '{{.ID}} {{.Label "docker-volume-backup.exec-pre-backup"}}' \
> "$TEMPFILE"
while read line; do
info "Pre-exec command: $line"
docker exec $line
done < "$TEMPFILE"
rm "$TEMPFILE"
fi
if [ ! -z "$BACKUP_CUSTOM_LABEL" ]; then
CUSTOM_LABEL="--filter label=$BACKUP_CUSTOM_LABEL"
fi

info "Creating backup"
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
TIME_BACK_UP="$(date +%s.%N)"
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
BACKUP_SIZE="$(du --bytes $BACKUP_FILENAME | sed 's/\s.*$//')"
TIME_BACKED_UP="$(date +%s.%N)"

if [ ! -z "$GPG_PASSPHRASE" ]; then
info "Encrypting backup"
gpg --symmetric --cipher-algo aes256 --batch --passphrase "$GPG_PASSPHRASE" -o "${BACKUP_FILENAME}.gpg" $BACKUP_FILENAME
rm $BACKUP_FILENAME
BACKUP_FILENAME="${BACKUP_FILENAME}.gpg"
fi
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps --format "{{.ID}}" --filter "label=docker-volume-backup.stop-during-backup=true" $CUSTOM_LABEL > "$TEMPFILE"
CONTAINERS_TO_STOP="$(cat $TEMPFILE | tr '\n' ' ')"
CONTAINERS_TO_STOP_TOTAL="$(cat $TEMPFILE | wc -l)"
CONTAINERS_TOTAL="$(docker ps --format "{{.ID}}" | wc -l)"
rm "$TEMPFILE"
echo "$CONTAINERS_TOTAL containers running on host in total"
echo "$CONTAINERS_TO_STOP_TOTAL containers marked to be stopped during backup"
else
CONTAINERS_TO_STOP_TOTAL="0"
CONTAINERS_TOTAL="0"
echo "Cannot access \"$DOCKER_SOCK\", won't look for containers to stop"
fi

if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--filter "label=docker-volume-backup.exec-post-backup" $CUSTOM_LABEL \
--format '{{.ID}} {{.Label "docker-volume-backup.exec-post-backup"}}' \
> "$TEMPFILE"
while read line; do
info "Post-exec command: $line"
docker exec $line
done < "$TEMPFILE"
rm "$TEMPFILE"
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Stopping containers"
docker stop $CONTAINERS_TO_STOP
fi

if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Starting containers back up"
docker start $CONTAINERS_TO_STOP
fi
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--filter "label=docker-volume-backup.exec-pre-backup" $CUSTOM_LABEL \
--format '{{.ID}} {{.Label "docker-volume-backup.exec-pre-backup"}}' \
> "$TEMPFILE"
while read line; do
info "Pre-exec command: $line"
docker exec $line
done < "$TEMPFILE"
rm "$TEMPFILE"
fi

info "Waiting before processing"
echo "Sleeping $BACKUP_WAIT_SECONDS seconds..."
sleep "$BACKUP_WAIT_SECONDS"

TIME_UPLOAD="0"
TIME_UPLOADED="0"
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to S3"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\""
TIME_UPLOAD="$(date +%s.%N)"
aws $AWS_EXTRA_ARGS s3 cp --only-show-errors "$BACKUP_FILENAME" "s3://$AWS_S3_BUCKET_NAME/"
echo "Upload finished"
TIME_UPLOADED="$(date +%s.%N)"
fi
if [ ! -z "$AWS_GLACIER_VAULT_NAME" ]; then
info "Uploading backup to GLACIER"
echo "Will upload to vault \"$AWS_GLACIER_VAULT_NAME\""
TIME_UPLOAD="$(date +%s.%N)"
aws $AWS_EXTRA_ARGS glacier upload-archive --account-id - --vault-name "$AWS_GLACIER_VAULT_NAME" --body "$BACKUP_FILENAME"
echo "Upload finished"
TIME_UPLOADED="$(date +%s.%N)"
fi
info "Creating backup"
BACKUP_FILENAME="$(date +"${BACKUP_FILENAME:-backup-%Y-%m-%dT%H-%M-%S.tar.gz}")"
TIME_BACK_UP="$(date +%s.%N)"
tar -czvf "$BACKUP_FILENAME" $BACKUP_SOURCES # allow the var to expand, in case we have multiple sources
BACKUP_SIZE="$(du --bytes $BACKUP_FILENAME | sed 's/\s.*$//')"
TIME_BACKED_UP="$(date +%s.%N)"

if [ ! -z "$GPG_PASSPHRASE" ]; then
info "Encrypting backup"
gpg --symmetric --cipher-algo aes256 --batch --passphrase "$GPG_PASSPHRASE" -o "${BACKUP_FILENAME}.gpg" $BACKUP_FILENAME
rm $BACKUP_FILENAME
BACKUP_FILENAME="${BACKUP_FILENAME}.gpg"
fi

if [ -d "$BACKUP_ARCHIVE" ]; then
info "Archiving backup"
mv -v "$BACKUP_FILENAME" "$BACKUP_ARCHIVE/$BACKUP_FILENAME"
if (($BACKUP_UID > 0)); then
chown -v $BACKUP_UID:$BACKUP_GID "$BACKUP_ARCHIVE/$BACKUP_FILENAME"
if [ -S "$DOCKER_SOCK" ]; then
TEMPFILE="$(mktemp)"
docker ps \
--filter "label=docker-volume-backup.exec-post-backup" $CUSTOM_LABEL \
--format '{{.ID}} {{.Label "docker-volume-backup.exec-post-backup"}}' \
> "$TEMPFILE"
while read line; do
info "Post-exec command: $line"
docker exec $line
done < "$TEMPFILE"
rm "$TEMPFILE"
fi
fi

if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
fi
if [ "$CONTAINERS_TO_STOP_TOTAL" != "0" ]; then
info "Starting containers back up"
docker start $CONTAINERS_TO_STOP
fi

info "Waiting before processing"
echo "Sleeping $BACKUP_WAIT_SECONDS seconds..."
sleep "$BACKUP_WAIT_SECONDS"

TIME_UPLOAD="0"
TIME_UPLOADED="0"
if [ ! -z "$AWS_S3_BUCKET_NAME" ]; then
info "Uploading backup to S3"
echo "Will upload to bucket \"$AWS_S3_BUCKET_NAME\""
TIME_UPLOAD="$(date +%s.%N)"
aws $AWS_EXTRA_ARGS s3 cp --only-show-errors "$BACKUP_FILENAME" "s3://$AWS_S3_BUCKET_NAME/"
echo "Upload finished"
TIME_UPLOADED="$(date +%s.%N)"
fi
if [ ! -z "$AWS_GLACIER_VAULT_NAME" ]; then
info "Uploading backup to GLACIER"
echo "Will upload to vault \"$AWS_GLACIER_VAULT_NAME\""
TIME_UPLOAD="$(date +%s.%N)"
aws $AWS_EXTRA_ARGS glacier upload-archive --account-id - --vault-name "$AWS_GLACIER_VAULT_NAME" --body "$BACKUP_FILENAME"
echo "Upload finished"
TIME_UPLOADED="$(date +%s.%N)"
fi

info "Collecting metrics"
TIME_FINISH="$(date +%s.%N)"
INFLUX_LINE="$INFLUXDB_MEASUREMENT\
,host=$BACKUP_HOSTNAME\
if [ -d "$BACKUP_ARCHIVE" ]; then
info "Archiving backup"
mv -v "$BACKUP_FILENAME" "$BACKUP_ARCHIVE/$BACKUP_FILENAME"
if (($BACKUP_UID > 0)); then
chown -v $BACKUP_UID:$BACKUP_GID "$BACKUP_ARCHIVE/$BACKUP_FILENAME"
fi
fi

if [ -f "$BACKUP_FILENAME" ]; then
info "Cleaning up"
rm -vf "$BACKUP_FILENAME"
fi

info "Collecting metrics"
TIME_FINISH="$(date +%s.%N)"
INFLUX_LINE="$INFLUXDB_MEASUREMENT\
,host=$BACKUP_HOSTNAME\
\
size_compressed_bytes=$BACKUP_SIZE\
size_compressed_bytes=$BACKUP_SIZE\
,containers_total=$CONTAINERS_TOTAL\
,containers_stopped=$CONTAINERS_TO_STOP_TOTAL\
,time_wall=$(perl -E "say $TIME_FINISH - $TIME_START")\
,time_total=$(perl -E "say $TIME_FINISH - $TIME_START - $BACKUP_WAIT_SECONDS")\
,time_compress=$(perl -E "say $TIME_BACKED_UP - $TIME_BACK_UP")\
,time_upload=$(perl -E "say $TIME_UPLOADED - $TIME_UPLOAD")\
"
echo "$INFLUX_LINE" | sed 's/ /,/g' | tr , '\n'

if [ ! -z "$INFLUXDB_URL" ]; then
info "Shipping metrics"
curl \
--silent \
--include \
--request POST \
--user "$INFLUXDB_CREDENTIALS" \
"$INFLUXDB_URL/write?db=$INFLUXDB_DB" \
--data-binary "$INFLUX_LINE"
fi
echo "$INFLUX_LINE" | sed 's/ /,/g' | tr , '\n'

if [ ! -z "$INFLUXDB_URL" ]; then
info "Shipping metrics"
curl \
--silent \
--include \
--request POST \
--user "$INFLUXDB_CREDENTIALS" \
"$INFLUXDB_URL/write?db=$INFLUXDB_DB" \
--data-binary "$INFLUX_LINE"
fi

info "Backup finished"
echo "Will wait for next scheduled backup"
info "Backup finished"
fi
echo "Will wait for next scheduled backup"
1 change: 1 addition & 0 deletions src/entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ INFLUXDB_DB="${INFLUXDB_DB:-}"
INFLUXDB_CREDENTIALS="${INFLUXDB_CREDENTIALS:-}"
INFLUXDB_MEASUREMENT="${INFLUXDB_MEASUREMENT:-docker_volume_backup}"
BACKUP_CUSTOM_LABEL="${BACKUP_CUSTOM_LABEL:-}"
CHECK_HOST="${CHECK_HOST:-"false"}"
EOF
chmod a+x env.sh
source env.sh
Expand Down
22 changes: 22 additions & 0 deletions test/backing-up-check-host/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
version: "3"

services:

dashboard:
image: grafana/grafana
ports:
- "3000:3000"
volumes:
- grafana-data:/var/lib/grafana

backup:
build: ../..
environment:
BACKUP_CRON_EXPRESSION: "* * * * *"
CHECK_MOUNT: "192.168.0.2" # The script sends a ping to 192.168.0.2. If the host answers the ping, the backup starts. Otherwise, it is skipped. You can als provide a hostname that is resolved by means of DNS.
volumes:
- grafana-data:/backup/grafana-data:ro
- ./backups:/archive

volumes:
grafana-data:

0 comments on commit dff874a

Please sign in to comment.