@@ -279,14 +279,15 @@ backup_redis() {
279
279
pre_dbbackup
280
280
print_notice "Dumping Redis - Flushing Redis Cache First"
281
281
target=redis_all_${DB_HOST,,}_${now}.rdb
282
- echo bgsave | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
282
+ echo bgsave | silent redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} --rdb ${TEMP_LOCATION}/${target} ${EXTRA_OPTS}
283
283
sleep 10
284
284
try=5
285
285
while [ $try -gt 0 ] ; do
286
286
saved=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_bgsave_in_progress:0/{print "saved"}')
287
287
ok=$(echo 'info Persistence' | redis-cli -h ${DB_HOST} -p ${DB_PORT} ${REDIS_PASS_STR} | awk '/rdb_last_bgsave_status:ok/{print "ok"}')
288
288
if [[ "$saved" = "saved" ]] && [[ "$ok" = "ok" ]]; then
289
289
print_notice "Redis Backup Complete"
290
+ exit_code=0
290
291
break
291
292
fi
292
293
try=$((try - 1))
@@ -296,6 +297,7 @@ backup_redis() {
296
297
target_original=${target}
297
298
compression
298
299
$compress_cmd "${TEMP_LOCATION}/${target_original}"
300
+ check_exit_code $target
299
301
generate_checksum
300
302
move_dbbackup
301
303
post_dbbackup all
@@ -430,11 +432,31 @@ check_exit_code() {
430
432
cleanup_old_data() {
431
433
if [ -n "${DB_CLEANUP_TIME}" ]; then
432
434
if [ "${master_exit_code}" != 1 ]; then
433
- print_info "Cleaning up old backups"
434
- mkdir -p "${DB_DUMP_TARGET}"
435
- find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
435
+ case "${BACKUP_LOCATION,,}" in
436
+ "file" | "filesystem" )
437
+ print_info "Cleaning up old backups"
438
+ mkdir -p "${DB_DUMP_TARGET}"
439
+ find "${DB_DUMP_TARGET}"/ -mmin +"${DB_CLEANUP_TIME}" -iname "*" -exec rm {} \;
440
+ ;;
441
+ "s3" | "minio" )
442
+ print_info "Cleaning up old backups"
443
+ aws ${PARAM_AWS_ENDPOINT_URL} s3 ls s3://${S3_BUCKET}/${S3_PATH} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS} | grep " DIR " -v | while read -r s3_file; do
444
+ s3_createdate=$(echo $s3_file | awk {'print $1" "$2'})
445
+ s3_createdate=$(date -d "$s3_createdate" "+%s")
446
+ s3_olderthan=$(echo $(( $(date +%s)-${DB_CLEANUP_TIME}*60 )))
447
+ if [[ $s3_createdate -le $s3_olderthan ]] ; then
448
+ s3_filename=$(echo $s3_file | awk {'print $4'})
449
+ if [ $s3_filename != "" ] ; then
450
+ print_debug "Deleting $s3_filename"
451
+ silent aws ${PARAM_AWS_ENDPOINT_URL} s3 rm s3://${S3_BUCKET}/${S3_PATH}/${s3_filename} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
452
+ fi
453
+ fi
454
+
455
+ done
456
+ ;;
457
+ esac
436
458
else
437
- print_info "Skipping Cleaning up old backups because there were errors in backing up"
459
+ print_error "Skipping Cleaning up old backups because there were errors in backing up"
438
460
fi
439
461
fi
440
462
}
@@ -446,31 +468,31 @@ compression() {
446
468
447
469
case "${COMPRESSION,,}" in
448
470
gz* )
449
- compress_cmd="pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
471
+ compress_cmd="silent pigz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
450
472
compression_type="gzip"
451
473
extension=".gz"
452
474
dir_compress_cmd=${compress_cmd}
453
475
target_dir=${target}
454
476
target=${target}.gz
455
477
;;
456
478
bz* )
457
- compress_cmd="pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
479
+ compress_cmd="silent pbzip2 -${COMPRESSION_LEVEL} -p${PARALLEL_COMPRESSION_THREADS} "
458
480
compression_type="bzip2"
459
481
dir_compress_cmd=${compress_cmd}
460
482
extension=".bz2"
461
483
target_dir=${target}
462
484
target=${target}.bz2
463
485
;;
464
486
xz* )
465
- compress_cmd="pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
487
+ compress_cmd="silent pixz -${COMPRESSION_LEVEL} -p ${PARALLEL_COMPRESSION_THREADS} "
466
488
compression_type="xzip"
467
489
dir_compress_cmd=${compress_cmd}
468
490
extension=".xz"
469
491
target_dir=${target}
470
492
target=${target}.xz
471
493
;;
472
494
zst* )
473
- compress_cmd="zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
495
+ compress_cmd="silent zstd --rm -${COMPRESSION_LEVEL} -T${PARALLEL_COMPRESSION_THREADS} "
474
496
compression_type="zstd"
475
497
dir_compress_cmd=${compress_cmd}
476
498
extension=".zst"
@@ -506,12 +528,12 @@ create_archive() {
506
528
print_notice "Creating archive file of '${target_dir}' with tar ${compresion_string}"
507
529
tar cf - "${TEMP_LOCATION}"/"${target_dir}" | $dir_compress_cmd > "${TEMP_LOCATION}"/"${target_dir}".tar"${extension}"
508
530
else
509
- print_warn "Skipping creating archive file because backup did not complete successfully"
531
+ print_error "Skipping creating archive file because backup did not complete successfully"
510
532
fi
511
533
}
512
534
513
535
generate_checksum() {
514
- if var_true "${ENABLE_CHECKSUM}" ;then
536
+ if var_true "${ENABLE_CHECKSUM}" ; then
515
537
if [ "${exit_code}" = "0" ] ; then
516
538
case "${CHECKSUM,,}" in
517
539
"md5" )
@@ -530,13 +552,16 @@ generate_checksum() {
530
552
checksum_value=$(${checksum_command} "${target}" | awk ' { print $1}')
531
553
print_debug "${checksum_extension^^}: ${checksum_value} - ${target}"
532
554
else
533
- print_warn "Skipping Checksum creation because backup did not complete successfully"
555
+ print_error "Skipping Checksum creation because backup did not complete successfully"
534
556
fi
535
557
fi
536
558
}
537
559
538
560
move_dbbackup() {
539
561
if [ "${exit_code}" = "0" ] ; then
562
+ dbbackup_size="$(stat -c%s "${TEMP_LOCATION}"/"${target}")"
563
+ dbbackup_date="$(date -r "${TEMP_LOCATION}"/"${target}" +'%s')"
564
+
540
565
case "${SIZE_VALUE,,}" in
541
566
"b" | "bytes" )
542
567
SIZE_VALUE=1
@@ -560,9 +585,7 @@ move_dbbackup() {
560
585
"file" | "filesystem" )
561
586
print_debug "Moving backup to filesystem"
562
587
mkdir -p "${DB_DUMP_TARGET}"
563
- if var_true "${ENABLE_CHECKSUM}" ;then
564
- mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
565
- fi
588
+ mv "${TEMP_LOCATION}"/*."${checksum_extension}" "${DB_DUMP_TARGET}"/
566
589
mv "${TEMP_LOCATION}"/"${target}" "${DB_DUMP_TARGET}"/"${target}"
567
590
;;
568
591
"s3" | "minio" )
@@ -581,17 +604,17 @@ move_dbbackup() {
581
604
582
605
[[ ( -n "${S3_HOST}" ) ]] && PARAM_AWS_ENDPOINT_URL=" --endpoint-url ${S3_PROTOCOL}://${S3_HOST}"
583
606
584
- aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
585
- unset s3_ssl
586
- unset s3_ca_cert
587
- if var_true "${ENABLE_CHECKSUM}" ;then
588
- rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
607
+ silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/${target} s3://${S3_BUCKET}/${S3_PATH}/${target} ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
608
+ if var_true "${ENABLE_CHECKSUM}" ; then
609
+ silent aws ${PARAM_AWS_ENDPOINT_URL} s3 cp ${TEMP_LOCATION}/*.${checksum_extension} s3://${S3_BUCKET}/${S3_PATH}/ ${s3_ssl} ${s3_ca_cert} ${S3_EXTRA_OPTS}
589
610
fi
611
+
612
+ rm -rf "${TEMP_LOCATION}"/*."${checksum_extension}"
590
613
rm -rf "${TEMP_LOCATION}"/"${target}"
591
614
;;
592
615
esac
593
616
else
594
- print_warn "Skipping moving DB Backup to final location because backup did not complete successfully"
617
+ print_error "Skipping moving DB Backup to final location because backup did not complete successfully"
595
618
fi
596
619
597
620
rm -rf "${TEMP_LOCATION}"/*
@@ -611,10 +634,11 @@ post_dbbackup() {
611
634
612
635
if var_true "${CONTAINER_ENABLE_MONITORING}" ; then
613
636
print_notice "Sending Backup Statistics to Zabbix"
614
- silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "$(stat -c%s "${DB_DUMP_TARGET}"/"${target}") "
615
- silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "$(date -r "${DB_DUMP_TARGET}"/"${target}" +'%s') "
637
+ silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.size -o "${dbbackup_size} "
638
+ silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.datetime -o "${dbbackup_date} "
616
639
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.status -o "${exit_code}"
617
640
silent zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k dbbackup.backup_duration -o "$(echo $((dbbackup_finish_time-dbbackup_start_time)))"
641
+ if [ "$?" != "0" ] ; then print_error "Error sending statistics, consider disabling with 'CONTAINER_ENABLE_MONITORING=FALSE'" ; fi
618
642
fi
619
643
620
644
### Post Script Support
@@ -641,6 +665,8 @@ post_dbbackup() {
641
665
fi
642
666
643
667
print_notice "DB Backup for '${1}' time taken: $(echo ${dbbackup_total_time} | awk '{printf "Hours: %d Minutes: %02d Seconds: %02d", $1/3600, ($1/60)%60, $1%60}')"
668
+ unset s3_ssl
669
+ unset s3_ca_cert
644
670
}
645
671
646
672
sanity_test() {
0 commit comments