2021-12-14 06:35:56 +09:00
#!/bin/bash
set -e -u -o pipefail
# dumps all the databases in compressed (custom) format
# EXCLUDE: space seperated list of database names to be skipped
# KEEP: how many files to keep, eg 3 means, keep 3 days + todays backup
# the file format includes the database name and owner:
# <database name>.<owner>.<database type>-<version>_<host>_<port>_<date in YYYYMMDD>_<time in HHMM>_<sequenze in two digits with leading 0>.sql
function usage ( )
{
cat <<- EOT
2025-04-15 08:40:21 +09:00
Usage: ${ 0 ##/*/ } [ -t] [ -s] [ -g] [ -c] [ -r] [ -k <number to keep>] [ -n] [ -b <path>] [ -i <postgreSQL version>] [ -d <dump database name> [ -d ...] ] [ -e <exclude dump> [ -e ...] ] [ -u <db user>] [ -h <db host>] [ -p <db port>] [ -l <db password>] [ -L <log path>]
2021-12-14 06:35:56 +09:00
-t: test usage, no real backup is done
-s: turn ON ssl mode, default mode is off
-g: turn OFF dumping globals data, default is dumping globals data
-c: run clean up of old files before data is dumped. Default is run after data dump.
-k <number>: keep how many backups, default is 3 days/files
-n: keep files in numbers not in days
-b <path>: backup target path, if not set, /mnt/backup/db_dumps_fc/ is used
-i <version>: override automatically set database version
-d <name>: database name to dump, option can be given multiple times, if not set all databases are dumped
-e <name>: exclude database from dump, option can be given multiple times, if not set non are excluded
-u <db user>: default is 'postgres'
-h <db host>: default is none
-p <db port>: default port is '5432'
-l <db password>: default password is empty
-r: use redhat base paths instead of debian
-L <log path>: where to put the dump log file, if not set tries to use PostgreSQL log folder
EOT
}
TEST = 0; # if set to 1 will run script without doing anything
SSLMODE = 'disable' ;
GLOBALS = 1; # if set to 0 will not dump globals
# in days, keeps KEEP+1 files, today + KEEP days before
# or keep number of files if CLEAN_NUMBER is true, then it can't be 0
KEEP = 3;
CLEAN_NUMBER = 0;
BACKUPDIR = '' ;
DB_VERSION = '' ;
DB_USER = '' ;
DB_PASSWD = '' ;
DB_HOST = '' ;
DB_PORT = '' ;
EXCLUDE = '' ; # space separated list of database names
INCLUDE = '' ; # space seperated list of database names
BC = '/usr/bin/bc' ;
PRE_RUN_CLEAN_UP = 0;
SET_IDENT = 0;
PORT_REGEX = " ^[0-9]{4,5} $" ;
OPTARG_REGEX = "^-" ;
# log path
LOG_PATH = '' ;
# base path for PostgreSQL binary
2025-04-14 15:58:41 +09:00
# DBPATH_BASE='';
2021-12-14 06:35:56 +09:00
# defaults
2025-10-15 18:46:02 +09:00
_BACKUPDIR = '' ;
2025-04-15 08:44:00 +09:00
_DB_VERSION = $(
pgv = $(
pg_dump --version | grep "pg_dump" | cut -d " " -f 3
) ;
if [ [ $( echo " ${ pgv } " | cut -d "." -f 1) -ge 10 ] ] ; then
echo " ${ pgv } " | cut -d "." -f 1;
else
echo " ${ pgv } " | cut -d "." -f 1,2;
fi
) ;
2021-12-14 06:35:56 +09:00
_DB_USER = 'postgres' ;
_DB_PASSWD = '' ;
_DB_HOST = '' ;
_DB_PORT = 5432;
_EXCLUDE = '' ; # space separated list of database names
_INCLUDE = '' ; # space seperated list of database names
REDHAT = 0;
2025-04-14 15:58:41 +09:00
# CONN_DB_HOST='';
2021-12-14 06:35:56 +09:00
ERROR = 0;
# set options
2025-04-15 08:40:21 +09:00
while getopts ":ctsgnk:b:i:d:e:u:h:p:l:L:rm" opt; do
2021-12-14 06:35:56 +09:00
# pre test for unfilled
if [ " ${ opt } " = ":" ] || [ [ " ${ OPTARG - } " = ~ ${ OPTARG_REGEX } ] ] ; then
if [ " ${ opt } " = ":" ] ; then
CHECK_OPT = ${ OPTARG } ;
else
CHECK_OPT = ${ opt } ;
fi ;
case ${ CHECK_OPT } in
k)
echo "-k needs a number" ;
ERROR = 1;
; ;
b)
echo "-b needs a path" ;
ERROR = 1;
; ;
i)
echo "-i needs an ident" ;
ERROR = 1;
; ;
u)
echo "-u needs a user name" ;
ERROR = 1;
; ;
h)
echo "-h needs a host name" ;
ERROR = 1;
; ;
p)
echo "-p needs a port number" ;
ERROR = 1;
; ;
l)
echo "-l needs a login password" ;
ERROR = 1;
; ;
d)
echo "-d needs a database name" ;
ERROR = 1;
; ;
e)
echo "-e needs a database name" ;
ERROR = 1;
; ;
L)
echo "-L needs a log path" ;
ERROR = 1;
; ;
esac
fi ;
# set options
case ${ opt } in
2025-04-14 15:58:41 +09:00
# t|test)
t)
2021-12-14 06:35:56 +09:00
TEST = 1;
; ;
2025-04-14 15:58:41 +09:00
# g|globals)
g)
2021-12-14 06:35:56 +09:00
GLOBALS = 0;
; ;
2025-04-14 15:58:41 +09:00
# c|clean-up-before)
c)
2021-12-14 06:35:56 +09:00
PRE_RUN_CLEAN_UP = 1;
; ;
2025-04-14 15:58:41 +09:00
# s|sslmode)
s)
2021-12-14 06:35:56 +09:00
SSLMODE = enable;
; ;
2025-04-14 15:58:41 +09:00
# k|keep)
k)
2021-12-14 06:35:56 +09:00
KEEP = ${ OPTARG } ;
; ;
2025-04-14 15:58:41 +09:00
# n|number-keep)
n)
2021-12-14 06:35:56 +09:00
CLEAN_NUMBER = 1;
; ;
2025-04-14 15:58:41 +09:00
# b|backuppath)
b)
2021-12-14 06:35:56 +09:00
if [ -z " ${ BACKUPDIR } " ] ; then
BACKUPDIR = " ${ OPTARG } " ;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# i|ident)
i)
2021-12-14 06:35:56 +09:00
if [ -z " ${ DB_VERSION } " ] ; then
DB_VERSION = ${ OPTARG } ;
SET_IDENT = 1;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# u|user)
u)
2021-12-14 06:35:56 +09:00
if [ -z " ${ DB_USER } " ] ; then
DB_USER = ${ OPTARG } ;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# h|hostname)
h)
2021-12-14 06:35:56 +09:00
if [ -z " ${ DB_HOST } " ] ; then
DB_HOST = ${ OPTARG } ;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# p|port)
p)
2021-12-14 06:35:56 +09:00
if [ -z " ${ DB_PORT } " ] ; then
DB_PORT = ${ OPTARG } ;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# l|login)
l)
2021-12-14 06:35:56 +09:00
if [ -z " ${ DB_PASSWD } " ] ; then
DB_PASSWD = ${ OPTARG } ;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# d|database)
d)
if [ -z " ${ INCLUDE } " ] ; then
2021-12-14 06:35:56 +09:00
INCLUDE = ${ INCLUDE } " " ;
fi ;
INCLUDE = ${ INCLUDE } ${ OPTARG } ;
; ;
2025-04-14 15:58:41 +09:00
# e|exclude)
e)
if [ -z " ${ EXCLUDE } " ] ; then
2021-12-14 06:35:56 +09:00
EXCLUDE = ${ EXCLUDE } " " ;
fi ;
EXCLUDE = ${ EXCLUDE } ${ OPTARG } ;
; ;
2025-04-14 15:58:41 +09:00
# r|redhat)
r)
2021-12-14 06:35:56 +09:00
REDHAT = 1;
; ;
2025-04-14 15:58:41 +09:00
# L|logpath)
L)
if [ -z " ${ LOG_PATH } " ] ; then
2021-12-14 06:35:56 +09:00
LOG_PATH = " ${ OPTARG } " ;
fi ;
; ;
2025-04-14 15:58:41 +09:00
# m|manual)
m)
2021-12-14 06:35:56 +09:00
usage;
exit 0;
; ;
:)
echo " Option - $OPTARG requires an argument. "
; ;
\? )
echo -e " \n Option does not exist: ${ OPTARG } \n " ;
usage;
exit 1;
; ;
esac ;
done ;
if [ ${ ERROR } -eq 1 ] ; then
exit 0;
fi ;
# if we have numeric keep and keep number is set to 0 abort
2025-04-14 15:58:41 +09:00
if [ " ${ CLEAN_NUMBER } " -eq 1 ] && [ " ${ KEEP } " -lt 1 ] ; then
2021-12-14 06:35:56 +09:00
echo "If keep in numbers is on, keep must be at least 1 or higher" ;
exit 0;
fi ;
# set the defaults
for name in BACKUPDIR DB_VERSION DB_USER DB_PASSWD DB_HOST DB_PORT EXCLUDE INCLUDE; do
# assign it to the real name if the real name is empty
if [ -z " ${ !name } " ] ; then
# add the _ for the default name
default = "_" ${ name } ;
2025-04-14 15:58:41 +09:00
declare $name = ${ !default }
2021-12-14 06:35:56 +09:00
fi ;
done ;
# check base paths depending on installation and set log file
if [ " ${ REDHAT } " -eq 1 ] ; then
# Redhat base path (for non official ones would be '/usr/pgsql-'
# This is also for Amazon NEWER (after 9.6)
PG_BASE_PATH = "/usr/pgsql-" ;
# I assume that as default
2025-04-14 10:59:08 +09:00
if [ -z " ${ LOG_PATH } " ] ; then
LOG_PATH = " /var/lib/pgsql/ ${ DB_VERSION } /data/log/ " ;
fi ;
2021-12-14 06:35:56 +09:00
else
# Debian base path
PG_BASE_PATH = "/usr/lib/postgresql/" ;
2025-04-14 10:59:08 +09:00
if [ -z " ${ LOG_PATH } " ] ; then
LOG_PATH = "/var/log/postgresql/" ;
fi ;
2021-12-14 06:35:56 +09:00
fi ;
# setup log before everything else
LOG = " ${ LOG_PATH } pg_db_dump_file.log " ;
# if we cannot write to the log file abort
if [ [ -f " ${ LOG } " && ! -w " ${ LOG } " ] ] || [ [ ! -f " ${ LOG } " && ! -w " ${ LOG_PATH } " ] ] ; then
echo " Cannot write to ${ LOG } or create a new log file in ${ LOG_PATH } " ;
exit;
fi ;
# log to file and also write to stdout
exec & > >( tee -a " ${ LOG } " ) ;
# check DB port is valid number
if ! [ [ " ${ DB_PORT } " = ~ ${ PORT_REGEX } ] ] ; then
2025-04-14 15:58:41 +09:00
echo " The port needs to be a valid number: ${ DB_PORT } " ;
2021-12-14 06:35:56 +09:00
exit 0;
fi ;
# check if we have the 'bc' command available or not
if [ -f " ${ BC } " ] ; then
BC_OK = 1;
else
BC_OK = 0;
fi ;
2025-04-14 15:58:41 +09:00
# the default pg params
PG_PARAMS = ( "-U" " ${ DB_USER } " "-p" " ${ DB_PORT } " )
2021-12-14 06:35:56 +09:00
# if DB_HOST is set, we need to add -h to the command line
# if nothing is set, DB_HOST is set to local so we know this is a "port" connection for later automatic restore
if [ -z " ${ DB_HOST } " ] ; then
DB_HOST = 'local' ;
else
2025-04-14 15:58:41 +09:00
# CONN_DB_HOST='-h '${DB_HOST};
PG_PARAMS += ( "-h" " ${ DB_HOST } " ) ;
2021-12-14 06:35:56 +09:00
fi ;
2025-04-14 15:58:41 +09:00
# copy to select
PG_PARAMS_SELECT = ( " ${ PG_PARAMS [@] } " ) ;
PG_PARAMS_SELECT += ( "-d" "template1" "-t" "-A" "-F" "," "-X" "-q" "-c" ) ;
2021-12-14 06:35:56 +09:00
# set the binaries we need
PG_PATH = ${ PG_BASE_PATH } ${ DB_VERSION } '/bin/' ;
PG_PSQL = ${ PG_PATH } 'psql' ;
PG_DUMP = ${ PG_PATH } 'pg_dump' ;
PG_DUMPALL = ${ PG_PATH } 'pg_dumpall' ;
DB_TYPE = 'pgsql' ;
db = '' ;
# core abort if no core files found
2025-04-14 15:58:41 +09:00
if [ ! -f " ${ PG_PSQL } " ] || [ ! -f " ${ PG_DUMP } " ] || [ ! -f " ${ PG_DUMPALL } " ] ; then
2021-12-14 06:35:56 +09:00
echo "One of the core binaries (psql, pg_dump, pg_dumpall) could not be found." ;
echo " Search Path: ${ PG_PATH } " ;
echo "Perhaps manual ident set with -i is necessary" ;
echo "Backup aborted" ;
exit 0;
fi ;
2025-10-15 18:46:02 +09:00
# not directory or length is zero
if [ ! -d " ${ BACKUPDIR } " ] || [ -z " ${ BACKUPDIR } " ] ; then
echo " Backup directory does not exist: ${ BACKUPDIR } " ;
exit 0;
2021-12-14 06:35:56 +09:00
fi
# check if we can write into that folder
2025-04-14 15:58:41 +09:00
touch " ${ BACKUPDIR } /tmpfile " || echo "[!] touch failed" ;
if [ ! -f " ${ BACKUPDIR } /tmpfile " ] ; then
2021-12-14 06:35:56 +09:00
echo " Cannot write to ${ BACKUPDIR } " ;
exit 0;
else
2025-10-15 18:46:02 +09:00
rm " ${ BACKUPDIR } /tmpfile " ;
2021-12-14 06:35:56 +09:00
fi ;
# if backupdir is "." rewrite to pwd
if [ " ${ BACKUPDIR } " = = '.' ] ; then
BACKUPDIR = $( pwd ) ;
fi ;
# check if we can connect to template1 table, if not we abort here
2025-04-14 15:58:41 +09:00
_PG_PARAMS_SELECT = ( " ${ PG_PARAMS_SELECT [@] } " ) ;
_PG_PARAMS_SELECT += ( "SELECT datname FROM pg_catalog.pg_database WHERE datname = 'template1';" ) ;
connect = $( ${ PG_PSQL } " ${ _PG_PARAMS_SELECT [@] } " ) || echo "[!] pgsql connect error" ;
2021-12-14 06:35:56 +09:00
if [ " ${ connect } " != "template1" ] ; then
echo " Failed to connect to template1 with user ' ${ DB_USER } ' at host ' ${ DB_HOST } ' on port ' ${ DB_PORT } ' " ;
exit 0;
fi ;
# if we have an ident override set, set a different DUMP VERSION here than the automatic one
if [ " ${ SET_IDENT } " -eq 1 ] ; then
2025-04-15 08:40:21 +09:00
DUMP_DB_VERSION = $(
pgv = $(
2025-04-15 08:44:00 +09:00
" ${ PG_PATH } /pg_dump " --version | grep "pg_dump" | cut -d " " -f 3
2025-04-15 08:40:21 +09:00
) ;
if [ [ $( echo " ${ pgv } " | cut -d "." -f 1) -ge 10 ] ] ; then
echo " ${ pgv } " | cut -d "." -f 1;
else
echo " ${ pgv } " | cut -d "." -f 1,2;
fi
) ;
2021-12-14 06:35:56 +09:00
else
DUMP_DB_VERSION = ${ DB_VERSION } ;
fi ;
# turn of ssl
# comment line out, if SSL connection is wanted
export PGSSLMODE = ${ SSLMODE } ;
# METHOD: convert_time
# PARAMS: timestamp in seconds or with milliseconds (nnnn.nnnn)
# RETURN: formated string with human readable time (d/h/m/s)
# CALL : var=$(convert_time $timestamp);
# DESC : converts a timestamp or a timestamp with float milliseconds to a human readable format
# output is in days/hours/minutes/seconds
function convert_time
{
timestamp = ${ 1 } ;
# round to four digits for ms
2025-04-14 15:58:41 +09:00
timestamp = $( printf "%1.4f" " $timestamp " ) ;
2021-12-14 06:35:56 +09:00
# get the ms part and remove any leading 0
2025-04-14 15:58:41 +09:00
ms = $( echo " ${ timestamp } " | cut -d "." -f 2 | sed -e 's/^0*//' ) ;
timestamp = $( echo " ${ timestamp } " | cut -d "." -f 1) ;
2021-12-14 06:35:56 +09:00
timegroups = ( 86400 3600 60 1) ; # day, hour, min, sec
timenames = ( "d" "h" "m" "s" ) ; # day, hour, min, sec
output = ( ) ;
time_string = '' ;
2025-04-14 15:58:41 +09:00
for timeslice in " ${ timegroups [@] } " ; do
2021-12-14 06:35:56 +09:00
# floor for the division, push to output
if [ ${ BC_OK } -eq 1 ] ; then
output[ ${# output [*] } ] = $( echo " ${ timestamp } / ${ timeslice } " | bc) ;
timestamp = $( echo " ${ timestamp } % ${ timeslice } " | bc) ;
else
output[ ${# output [*] } ] = $( awk " BEGIN {printf \"%d\", ${ timestamp } / ${ timeslice } } " ) ;
timestamp = $( awk " BEGIN {printf \"%d\", ${ timestamp } % ${ timeslice } } " ) ;
fi ;
done ;
for ( ( i = 0; i<${# output [@] } ; i++) ) ; do
2025-04-14 15:58:41 +09:00
if [ " ${ output [ $i ] } " -gt 0 ] || [ -n " $time_string " ] ; then
if [ -n " ${ time_string } " ] ; then
2021-12-14 06:35:56 +09:00
time_string = ${ time_string } " " ;
fi ;
time_string = ${ time_string } ${ output [ $i ] } ${ timenames [ $i ] } ;
fi ;
done ;
# milliseconds must be filled, but we also check that they are non "nan" string
# that can appear in the original value
2025-04-14 15:58:41 +09:00
if [ -n " ${ ms } " ] && [ " ${ ms } " != "nan" ] ; then
if [ " ${ ms } " -gt 0 ] ; then
time_string = " ${ time_string } ${ ms } ms " ;
2021-12-14 06:35:56 +09:00
fi ;
fi ;
# just in case the time is 0
if [ -z " ${ time_string } " ] ; then
time_string = "0s" ;
fi ;
echo -n " ${ time_string } " ;
}
# METHOD: convert_bytes
# PARAMS: size in bytes
# RETURN: human readable byte data in TB/GB/MB/KB/etc
# CALL : size=$(convert_bytes $bytes);
# DESC : converts bytes into human readable format with 2 decimals
function convert_bytes
{
bytes = ${ 1 } ;
# use awk to calc it
2025-04-14 15:58:41 +09:00
echo -n " $( echo " ${ bytes } " | awk ' function human( x) {
s = " B KB MB GB TB EB PB YB ZB"
while ( x>= 1024 && length( s) >1)
{ x/= 1024; s = substr( s,4) }
s = substr( s,1,4)
xf = ( s = = " B " ) ?"%d " :"%.2f"
return sprintf( xf"%s\n" , x, s)
2021-12-14 06:35:56 +09:00
}
2025-04-14 15:58:41 +09:00
{ gsub( /^[ 0-9] +/, human( $1 ) ) ; print} ' ) " ;
2021-12-14 06:35:56 +09:00
}
# METHOD: get_dump_file_name
# PARAMS: none
# RETURN: dump file name (echo)
# CALL : var=$(get_dump_file_name);
# DESC : function for getting the correct dump file
function get_dump_file_name
{
# set base search for the files
2025-04-14 15:58:41 +09:00
if [ " ${ db } " ] ; then
db_name = " ${ db } . ${ owner } . ${ encoding } . " ;
2021-12-14 06:35:56 +09:00
else
2025-04-14 15:58:41 +09:00
db_name = " pg_globals. ${ DB_USER } .NONE. " ;
2021-12-14 06:35:56 +09:00
fi ;
seq = '' ;
# we need to find the next sequence number
2025-04-14 15:58:41 +09:00
for i in " ${ BACKUPDIR } / ${ db_name } ${ DB_TYPE } - ${ DUMP_DB_VERSION } _ ${ DB_HOST } _ ${ DB_PORT } _ $( date +%Y%m%d) _ $( date +%H%M) " *.c.sql; do
2021-12-14 06:35:56 +09:00
# get the last sequence and cut any leading 0 so we can run +1 on it
2025-04-14 15:58:41 +09:00
seq = $( echo " $i " | cut -d "." -f 4 | cut -d "_" -f 6 | sed -e "s/^0//g" ) ;
2021-12-14 06:35:56 +09:00
done ;
2025-04-14 15:58:41 +09:00
if [ -n " ${ seq } " ] ; then
2021-12-14 06:35:56 +09:00
# add +1 and if < 10 prefix with 0
2025-04-14 15:58:41 +09:00
seq = $(( seq+1)) ;
2021-12-14 06:35:56 +09:00
if [ ${ seq } -lt 10 ] ; then
sequence = "0" ${ seq } ;
else
sequence = ${ seq } ;
fi ;
else
sequence = "01" ;
fi ;
# now build correct file name
2025-04-14 15:58:41 +09:00
filename = " ${ BACKUPDIR } / ${ db_name } ${ DB_TYPE } - ${ DUMP_DB_VERSION } _ ${ DB_HOST } _ ${ DB_PORT } _ $( date +%Y%m%d) _ $( date +%H%M) _ ${ sequence } .c.sql " ;
2021-12-14 06:35:56 +09:00
echo " ${ filename } " ;
}
# METHOD: get_dump_databases
# PARAMS: none
# RETURN: none
# CALLS : var=$(get_dump_databases)
# DESC : this is needd only for clean up run if the clean up is run before
# the actually database dump
# fills up the global search names aray
function get_dump_databases
{
search_names = ( ) ;
if [ ${ GLOBALS } -eq 1 ] ; then
search_names += ( "pg_globals.*" ) ;
fi ;
2025-04-14 15:58:41 +09:00
_PG_PARAMS_SELECT = ( " ${ PG_PARAMS_SELECT [@] } " ) ;
_PG_PARAMS_SELECT += ( "SELECT pg_catalog.pg_get_userbyid(datdba) AS owner, datname, pg_catalog.pg_encoding_to_char(encoding) FROM pg_catalog.pg_database WHERE datname !~ 'template(0|1)';" ) ;
for owner_db in $( ${ PG_PSQL } " ${ _PG_PARAMS_SELECT [@] } " ) ; do
db = $( echo " ${ owner_db } " | cut -d "," -f 2) ;
2021-12-14 06:35:56 +09:00
# check if we exclude this db
exclude = 0;
include = 0;
for excl_db in ${ EXCLUDE } ; do
if [ " ${ db } " = " ${ excl_db } " ] ; then
exclude = 1;
break;
fi ;
done ;
2025-04-14 15:58:41 +09:00
if [ -n " ${ INCLUDE } " ] ; then
2021-12-14 06:35:56 +09:00
for incl_db in ${ INCLUDE } ; do
if [ " ${ db } " = " ${ incl_db } " ] ; then
include = 1;
break;
fi ;
done ;
else
include = 1;
fi ;
if [ ${ exclude } -eq 0 ] && [ ${ include } -eq 1 ] ; then
search_names += ( " ${ db } .* " ) ;
fi ;
done ;
}
# METHOD: clean_up
# PARAMS: none
# RETURN: none
# CALL : $(clean_up);
# DESC : checks for older files than given keep time/amount and removes them
function clean_up
{
2025-04-14 15:58:41 +09:00
if [ -d " ${ BACKUPDIR } " ] ; then
2021-12-14 06:35:56 +09:00
if [ ${ CLEAN_NUMBER } -eq 0 ] ; then
echo " Cleanup older than ${ KEEP } days backup in ${ BACKUPDIR } " ;
else
echo " Cleanup up, keep only ${ KEEP } backups in ${ BACKUPDIR } " ;
# if we run clean before backup, we need to clean up +1
# so if we keep one, we must remove all data before running new
if [ ${ PRE_RUN_CLEAN_UP } -eq 1 ] ; then
2025-04-14 15:58:41 +09:00
# let KEEP=${KEEP}-1;
KEEP = $(( KEEP-1)) ;
2021-12-14 06:35:56 +09:00
fi ;
fi ;
# build the find string based on the search names patter
2025-04-14 15:58:41 +09:00
find_params = ( " ${ BACKUPDIR } " ) ;
2021-12-14 06:35:56 +09:00
for name in " ${ search_names [@] } " ; do
# for not number based, we build the find string here
# else we do the delete here already
if [ ${ CLEAN_NUMBER } -eq 0 ] ; then
2025-04-14 15:58:41 +09:00
if [ ${# find_params [@] } -gt 1 ] ; then
find_params += ( "-o" ) ;
2021-12-14 06:35:56 +09:00
fi ;
2025-04-14 15:58:41 +09:00
find_params += ( "-mtime" " + ${ KEEP } " "-name" " ${ name } ${ DB_TYPE } *.sql " "-type" "f" "-delete" "-print" ) ;
2021-12-14 06:35:56 +09:00
echo " - Remove old backups for ' ${ name } ' " ;
else
# if we do number based delete of old data, but only if the number of
# files is bigger than the keep number or equal if we do PRE_RUN_CLEAN_UP
# this can be error, but we allow it -> script should not abort here
# note we have a wildcard in the name, so we can't put that into ""
2025-04-14 15:58:41 +09:00
count = $( find " ${ BACKUPDIR } " -name " ${ name } ${ DB_TYPE } *.sql " -type f -print | wc -l) ;
2021-12-14 06:35:56 +09:00
if [ ${ PRE_RUN_CLEAN_UP } -eq 1 ] ; then
2025-04-14 15:58:41 +09:00
# let count=${count}+1;
count = $(( count+1)) ;
2021-12-14 06:35:56 +09:00
fi ;
2025-04-14 15:58:41 +09:00
if [ " ${ count } " -gt " ${ KEEP } " ] ; then
2021-12-14 06:35:56 +09:00
# calculate the amount to delete
# eg if we want to keep 1, and we have 3 files then we need to delete 2
# keep is always +1 (include the to backup count).
# count is +1 if we do a pre-run cleanup
# grouped by db name, db type
2025-04-14 15:58:41 +09:00
TO_DELETE = $(( count-KEEP)) ;
2021-12-14 06:35:56 +09:00
echo " - Remove old backups for ' ${ name } ', found ${ count } , will delete ${ TO_DELETE } " ;
if [ ${ TEST } -eq 0 ] ; then
2025-04-15 08:40:21 +09:00
find " ${ BACKUPDIR } " -name " ${ name } ${ DB_TYPE } *.sql " -type f -printf "%Ts\t%p\n" | \
sort -n | \
head -n ${ TO_DELETE } | \
xargs rm;
2021-12-14 06:35:56 +09:00
else
2025-04-14 15:58:41 +09:00
print " find \" ${ BACKUPDIR } \" -name \" ${ name } ${ DB_TYPE } *.sql\" -type f -printf \"%Ts\\t%p\\n\" | sort -nr | head -n ${ TO_DELETE } | xargs rm " ;
2021-12-14 06:35:56 +09:00
fi ;
fi ;
fi ;
done ;
# if we do find (day based) delete of old data
if [ ${ CLEAN_NUMBER } -eq 0 ] ; then
if [ ${ TEST } -eq 0 ] ; then
2025-04-14 15:58:41 +09:00
find " ${ find_params [@] } " ;
2021-12-14 06:35:56 +09:00
else
2025-04-14 15:58:41 +09:00
echo " find ${ find_params [*] } " ;
2021-12-14 06:35:56 +09:00
fi ;
fi ;
fi
}
2025-04-14 15:58:41 +09:00
if [ -n " ${ DB_PASSWD } " ] ; then
2021-12-14 06:35:56 +09:00
export PGPASSWORD = ${ DB_PASSWD } ;
fi ;
START = $( date "+%s" ) ;
echo "=======================================================================>" ;
printf "Starting at %s\n" " $( date '+%Y-%m-%d %H:%M:%S' ) " ;
echo " Target dump directory is: ${ BACKUPDIR } " ;
echo " Keep ${ KEEP } backups " ;
# if flag is set, do pre run clean up
if [ ${ PRE_RUN_CLEAN_UP } -eq 1 ] ; then
get_dump_databases;
clean_up;
fi ;
echo "Backing up databases:" ;
# reset search name list for actual dump
search_names = ( ) ;
# dump globals
if [ ${ GLOBALS } -eq 1 ] ; then
echo -e -n "+ Dumping globals ... "
# reset any previous set db name from deletes so the correct global file name is set
db = '' ;
filename = $( get_dump_file_name) ;
2025-04-14 15:58:41 +09:00
# this is used for the find/delete part
search_names += ( "pg_globals.*" ) ;
# build dump parms
_PG_PARAMS_DUMP = ( " ${ PG_PARAMS [@] } " ) ;
_PG_PARAMS_DUMP += ( "--globals-only" )
2021-12-14 06:35:56 +09:00
if [ ${ TEST } -eq 0 ] ; then
2025-04-14 15:58:41 +09:00
${ PG_DUMPALL } " ${ _PG_PARAMS_DUMP [@] } " > " ${ filename } " ;
2021-12-14 06:35:56 +09:00
else
2025-04-14 15:58:41 +09:00
echo " ${ PG_DUMPALL } ${ _PG_PARAMS_DUMP [*] } > ${ filename } " ;
2021-12-14 06:35:56 +09:00
fi ;
echo "done" ;
else
echo "- Skip dumping globals" ;
fi ;
echo -n "(+) Dump databases: " ;
if [ -z " ${ INCLUDE } " ] ; then
echo "All" ;
else
2025-04-14 15:58:41 +09:00
echo " ${ INCLUDE } " ;
2021-12-14 06:35:56 +09:00
fi ;
echo -n "(-) Exclude databases: " ;
if [ -z " ${ EXCLUDE } " ] ; then
echo "None" ;
else
2025-04-14 15:58:41 +09:00
echo " ${ EXCLUDE } " ;
2021-12-14 06:35:56 +09:00
fi ;
filesize_sum = 0;
2025-04-14 15:58:41 +09:00
_PG_PARAMS_SELECT = ( " ${ PG_PARAMS_SELECT [@] } " ) ;
_PG_PARAMS_SELECT += ( "SELECT pg_catalog.pg_get_userbyid(datdba) AS owner, datname, pg_catalog.pg_encoding_to_char(encoding) AS encoding FROM pg_catalog.pg_database WHERE datname !~ 'template(0|1)' ORDER BY datname;" ) ;
for owner_db in $( ${ PG_PSQL } " ${ _PG_PARAMS_SELECT [@] } " ) ; do
2021-12-14 06:35:56 +09:00
# get the user who owns the DB too
2025-04-14 15:58:41 +09:00
owner = $( echo " ${ owner_db } " | cut -d "," -f 1) ;
db = $( echo " ${ owner_db } " | cut -d "," -f 2) ;
encoding = $( echo " ${ owner_db } " | cut -d "," -f 3) ;
2021-12-14 06:35:56 +09:00
# check if we exclude this db
exclude = 0;
include = 0;
for excl_db in ${ EXCLUDE } ; do
if [ " ${ db } " = " ${ excl_db } " ] ; then
exclude = 1;
break;
fi ;
done ;
2025-04-14 15:58:41 +09:00
if [ -n " ${ INCLUDE } " ] ; then
2021-12-14 06:35:56 +09:00
for incl_db in ${ INCLUDE } ; do
if [ " ${ db } " = " ${ incl_db } " ] ; then
include = 1;
break;
fi ;
done ;
else
include = 1;
fi ;
if [ ${ exclude } -eq 0 ] && [ ${ include } -eq 1 ] ; then
printf "+ Dumping database: %35s ... " " ${ db } " ;
filename = $( get_dump_file_name) ;
search_names += ( " ${ db } .* " ) ;
SUBSTART = $( date "+%s" ) ;
2025-04-14 15:58:41 +09:00
# build dump parms
_PG_PARAMS_DUMP = ( " ${ PG_PARAMS [@] } " ) ;
_PG_PARAMS_DUMP += ( "-c" "--format=c" " ${ db } " )
2021-12-14 06:35:56 +09:00
if [ ${ TEST } -eq 0 ] ; then
2025-04-14 15:58:41 +09:00
${ PG_DUMP } " ${ _PG_PARAMS_DUMP [@] } " > " ${ filename } " ;
2021-12-14 06:35:56 +09:00
else
2025-04-14 15:58:41 +09:00
echo " ${ PG_DUMP } ${ _PG_PARAMS_DUMP [*] } > ${ filename } " ;
2021-12-14 06:35:56 +09:00
fi ;
# get the file size for the dumped file and convert it to a human readable format
filesize = 0;
if [ -f " ${ filename } " ] ; then
filesize = $( wc -c " ${ filename } " | cut -f 1 -d ' ' ) ;
2025-04-14 15:58:41 +09:00
filesize_sum = $(( filesize+filesize_sum)) ;
2021-12-14 06:35:56 +09:00
fi ;
2025-04-14 15:58:41 +09:00
DURATION = $(( $( date "+%s" ) - SUBSTART)) ;
printf "done (%s and %s)\n" " $( convert_time ${ DURATION } ) " " $( convert_bytes " ${ filesize } " ) " ;
2021-12-14 06:35:56 +09:00
else
printf -- "- Exclude database: %35s\n" " ${ db } " ;
fi ;
done
printf "Backup ended at %s\n" " $( date '+%Y-%m-%d %H:%M:%S' ) " ;
2025-04-14 15:58:41 +09:00
if [ -n " ${ DB_PASSWD } " ] ; then
2021-12-14 06:35:56 +09:00
unset DB_PASSWD;
fi ;
if [ ${ PRE_RUN_CLEAN_UP } -eq 0 ] ; then
clean_up;
fi ;
2025-04-14 15:58:41 +09:00
DURATION = $(( $( date "+%s" ) - START)) ;
2021-12-14 06:35:56 +09:00
printf "Cleanup ended at %s\n" " $( date '+%Y-%m-%d %H:%M:%S' ) " ;
printf "Finished backup in %s with %s\n" " $( convert_time ${ DURATION } ) " " $( convert_bytes ${ filesize_sum } ) " ;
## END