Clean up all scripts to match bash linting rules
pg_db_dump_file log parameter never set the log folder, it was always overwritten pg_drop_restore had a double ;; inside which stopped the script from working All: remove -a flag for Amazon Linux OS. This was only for V1 type and we do no longer support this (or use it) and V2 AWS Linux is based more on a redhat type, so the -r flag will work Various other formatting updates Change all strange ls to find calls Change all exec calls with params to params in array
This commit is contained in:
@@ -11,7 +11,7 @@ set -e -u -o pipefail
|
|||||||
function usage ()
|
function usage ()
|
||||||
{
|
{
|
||||||
cat <<- EOT
|
cat <<- EOT
|
||||||
Usage: ${0##/*/} [-t] [-s] [-g] [-c] [-r|-a] [-k <number to keep>] [-n] [-b <path>] [-i <postgreSQL version>] [-d <dump database name> [-d ...]] [-e <exclude dump> [-e ...]] [-u <db user>] [-h <db host>] [-p <db port>] [-l <db password>] [-L <log path>]
|
Usage: ${0##/*/} [-t] [-s] [-g] [-c] [-r] [-k <number to keep>] [-n] [-b <path>] [-i <postgreSQL version>] [-d <dump database name> [-d ...]] [-e <exclude dump> [-e ...]] [-u <db user>] [-h <db host>] [-p <db port>] [-l <db password>] [-L <log path>]
|
||||||
|
|
||||||
-t: test usage, no real backup is done
|
-t: test usage, no real backup is done
|
||||||
-s: turn ON ssl mode, default mode is off
|
-s: turn ON ssl mode, default mode is off
|
||||||
@@ -28,7 +28,6 @@ function usage ()
|
|||||||
-p <db port>: default port is '5432'
|
-p <db port>: default port is '5432'
|
||||||
-l <db password>: default password is empty
|
-l <db password>: default password is empty
|
||||||
-r: use redhat base paths instead of debian
|
-r: use redhat base paths instead of debian
|
||||||
-a: use amazon base paths instead of debian
|
|
||||||
-L <log path>: where to put the dump log file, if not set tries to use PostgreSQL log folder
|
-L <log path>: where to put the dump log file, if not set tries to use PostgreSQL log folder
|
||||||
EOT
|
EOT
|
||||||
}
|
}
|
||||||
@@ -67,12 +66,11 @@ _DB_PORT=5432;
|
|||||||
_EXCLUDE=''; # space separated list of database names
|
_EXCLUDE=''; # space separated list of database names
|
||||||
_INCLUDE=''; # space seperated list of database names
|
_INCLUDE=''; # space seperated list of database names
|
||||||
REDHAT=0;
|
REDHAT=0;
|
||||||
AMAZON=0;
|
|
||||||
# CONN_DB_HOST='';
|
# CONN_DB_HOST='';
|
||||||
ERROR=0;
|
ERROR=0;
|
||||||
|
|
||||||
# set options
|
# set options
|
||||||
while getopts ":ctsgnk:b:i:d:e:u:h:p:l:L:ram" opt; do
|
while getopts ":ctsgnk:b:i:d:e:u:h:p:l:L:rm" opt; do
|
||||||
# pre test for unfilled
|
# pre test for unfilled
|
||||||
if [ "${opt}" = ":" ] || [[ "${OPTARG-}" =~ ${OPTARG_REGEX} ]]; then
|
if [ "${opt}" = ":" ] || [[ "${OPTARG-}" =~ ${OPTARG_REGEX} ]]; then
|
||||||
if [ "${opt}" = ":" ]; then
|
if [ "${opt}" = ":" ]; then
|
||||||
@@ -204,10 +202,6 @@ while getopts ":ctsgnk:b:i:d:e:u:h:p:l:L:ram" opt; do
|
|||||||
r)
|
r)
|
||||||
REDHAT=1;
|
REDHAT=1;
|
||||||
;;
|
;;
|
||||||
# a|amazon)
|
|
||||||
a)
|
|
||||||
AMAZON=1;
|
|
||||||
;;
|
|
||||||
# L|logpath)
|
# L|logpath)
|
||||||
L)
|
L)
|
||||||
if [ -z "${LOG_PATH}" ]; then
|
if [ -z "${LOG_PATH}" ]; then
|
||||||
@@ -234,11 +228,6 @@ if [ ${ERROR} -eq 1 ]; then
|
|||||||
exit 0;
|
exit 0;
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
if [ "${REDHAT}" -eq 1 ] && [ "${AMAZON}" -eq 1 ]; then
|
|
||||||
echo "You cannot set the -a and -r flag at the same time";
|
|
||||||
exit 0;
|
|
||||||
fi;
|
|
||||||
|
|
||||||
# if we have numeric keep and keep number is set to 0 abort
|
# if we have numeric keep and keep number is set to 0 abort
|
||||||
if [ "${CLEAN_NUMBER}" -eq 1 ] && [ "${KEEP}" -lt 1 ]; then
|
if [ "${CLEAN_NUMBER}" -eq 1 ] && [ "${KEEP}" -lt 1 ]; then
|
||||||
echo "If keep in numbers is on, keep must be at least 1 or higher";
|
echo "If keep in numbers is on, keep must be at least 1 or higher";
|
||||||
@@ -264,13 +253,6 @@ if [ "${REDHAT}" -eq 1 ]; then
|
|||||||
if [ -z "${LOG_PATH}" ]; then
|
if [ -z "${LOG_PATH}" ]; then
|
||||||
LOG_PATH="/var/lib/pgsql/${DB_VERSION}/data/log/";
|
LOG_PATH="/var/lib/pgsql/${DB_VERSION}/data/log/";
|
||||||
fi;
|
fi;
|
||||||
elif [ "${AMAZON}" -eq 1 ]; then
|
|
||||||
# only older 9.6 or before
|
|
||||||
PG_BASE_PATH="/usr/lib64/pgsql";
|
|
||||||
# LOG PATH, will be attached to DB VERSION
|
|
||||||
if [ -z "${LOG_PATH}" ]; then
|
|
||||||
LOG_PATH="/var/lib/pgsql${DB_VERSION}/data/pg_log";
|
|
||||||
fi;
|
|
||||||
else
|
else
|
||||||
# Debian base path
|
# Debian base path
|
||||||
PG_BASE_PATH="/usr/lib/postgresql/";
|
PG_BASE_PATH="/usr/lib/postgresql/";
|
||||||
@@ -362,7 +344,18 @@ fi;
|
|||||||
|
|
||||||
# if we have an ident override set, set a different DUMP VERSION here than the automatic one
|
# if we have an ident override set, set a different DUMP VERSION here than the automatic one
|
||||||
if [ "${SET_IDENT}" -eq 1 ]; then
|
if [ "${SET_IDENT}" -eq 1 ]; then
|
||||||
DUMP_DB_VERSION=$(pgv=$("${PG_PATH}/pg_dump" --version | grep "pg_dump" | cut -d " " -f 3); if [[ $(echo "${pgv}" | cut -d "." -f 1) -ge 10 ]]; then echo "${pgv}" | cut -d "." -f 1; else echo "${pgv}" | cut -d "." -f 1,2; fi );
|
DUMP_DB_VERSION=$(
|
||||||
|
pgv=$(
|
||||||
|
"${PG_PATH}/pg_dump" --version | \
|
||||||
|
grep "pg_dump" | \
|
||||||
|
cut -d " " -f 3
|
||||||
|
);
|
||||||
|
if [[ $(echo "${pgv}" | cut -d "." -f 1) -ge 10 ]]; then
|
||||||
|
echo "${pgv}" | cut -d "." -f 1;
|
||||||
|
else
|
||||||
|
echo "${pgv}" | cut -d "." -f 1,2;
|
||||||
|
fi
|
||||||
|
);
|
||||||
else
|
else
|
||||||
DUMP_DB_VERSION=${DB_VERSION};
|
DUMP_DB_VERSION=${DB_VERSION};
|
||||||
fi;
|
fi;
|
||||||
@@ -568,7 +561,10 @@ function clean_up
|
|||||||
TO_DELETE=$((count-KEEP));
|
TO_DELETE=$((count-KEEP));
|
||||||
echo "- Remove old backups for '${name}', found ${count}, will delete ${TO_DELETE}";
|
echo "- Remove old backups for '${name}', found ${count}, will delete ${TO_DELETE}";
|
||||||
if [ ${TEST} -eq 0 ]; then
|
if [ ${TEST} -eq 0 ]; then
|
||||||
find "${BACKUPDIR}" -name "${name}${DB_TYPE}*.sql" -type f -printf "%Ts\t%p\n" | sort -nr | head -n ${TO_DELETE} | xargs rm;
|
find "${BACKUPDIR}" -name "${name}${DB_TYPE}*.sql" -type f -printf "%Ts\t%p\n" | \
|
||||||
|
sort -n | \
|
||||||
|
head -n ${TO_DELETE} | \
|
||||||
|
xargs rm;
|
||||||
else
|
else
|
||||||
print "find \"${BACKUPDIR}\" -name \"${name}${DB_TYPE}*.sql\" -type f -printf \"%Ts\\t%p\\n\" | sort -nr | head -n ${TO_DELETE} | xargs rm";
|
print "find \"${BACKUPDIR}\" -name \"${name}${DB_TYPE}*.sql\" -type f -printf \"%Ts\\t%p\\n\" | sort -nr | head -n ${TO_DELETE} | xargs rm";
|
||||||
fi;
|
fi;
|
||||||
|
|||||||
@@ -2,14 +2,14 @@
|
|||||||
|
|
||||||
# Author: Clemens Schwaighofer
|
# Author: Clemens Schwaighofer
|
||||||
# Description:
|
# Description:
|
||||||
# Drop and restore one database
|
# Drop and restore one database from a dump created by created by pg_db_dump_file.sh
|
||||||
|
|
||||||
function usage ()
|
function usage ()
|
||||||
{
|
{
|
||||||
cat <<- EOT
|
cat <<- EOT
|
||||||
Restores a single database dump to a database
|
Restores a single database dump to a database
|
||||||
|
|
||||||
Usage: ${0##/*/} -o <DB OWNER> -d <DB NAME> -f <FILE NAME> [-h <DB HOST>] [-p <DB PORT>] [-e <ENCODING>] [-i <POSTGRES VERSION>] [-j <JOBS>] [-s] [-r|-a] [-n]
|
Usage: ${0##/*/} -o <DB OWNER> -d <DB NAME> -f <FILE NAME> [-h <DB HOST>] [-p <DB PORT>] [-e <ENCODING>] [-i <POSTGRES VERSION>] [-j <JOBS>] [-s] [-r] [-n]
|
||||||
|
|
||||||
-o <DB OWNER>: The user who will be owner of the database to be restored
|
-o <DB OWNER>: The user who will be owner of the database to be restored
|
||||||
-d <DB NAME>: The database to restore the file to
|
-d <DB NAME>: The database to restore the file to
|
||||||
@@ -21,7 +21,6 @@ function usage ()
|
|||||||
-j <JOBS>: Run how many jobs Parallel. If not set, 2 jobs are run parallel
|
-j <JOBS>: Run how many jobs Parallel. If not set, 2 jobs are run parallel
|
||||||
-s: Restore only schema, no data
|
-s: Restore only schema, no data
|
||||||
-r: use redhat base paths instead of debian
|
-r: use redhat base paths instead of debian
|
||||||
-a: use amazon base paths instead of debian
|
|
||||||
-n: dry run, do not do anything, just test flow
|
-n: dry run, do not do anything, just test flow
|
||||||
EOT
|
EOT
|
||||||
}
|
}
|
||||||
@@ -29,20 +28,21 @@ function usage ()
|
|||||||
_port=5432
|
_port=5432
|
||||||
_host='local';
|
_host='local';
|
||||||
_encoding='UTF8';
|
_encoding='UTF8';
|
||||||
role='';
|
# role='';
|
||||||
schema='';
|
schema='';
|
||||||
NO_ASK=0;
|
NO_ASK=0;
|
||||||
TEMPLATEDB='template0';
|
TEMPLATEDB='template0';
|
||||||
SCHEMA_ONLY=0;
|
SCHEMA_ONLY=0;
|
||||||
REDHAT=0;
|
REDHAT=0;
|
||||||
AMAZON=0;
|
|
||||||
DRY_RUN=0;
|
DRY_RUN=0;
|
||||||
BC='/usr/bin/bc';
|
BC='/usr/bin/bc';
|
||||||
PORT_REGEX="^[0-9]{4,5}$";
|
PORT_REGEX="^[0-9]{4,5}$";
|
||||||
OPTARG_REGEX="^-";
|
OPTARG_REGEX="^-";
|
||||||
MAX_JOBS='';
|
MAX_JOBS='';
|
||||||
|
PG_PARAMS=();
|
||||||
|
PG_PARAM_ROLE=();
|
||||||
# if we have options, set them and then ignore anything below
|
# if we have options, set them and then ignore anything below
|
||||||
while getopts ":o:d:h:f:p:e:i:j:raqnms" opt; do
|
while getopts ":o:d:h:f:p:e:i:j:rqnms" opt; do
|
||||||
# pre test for unfilled
|
# pre test for unfilled
|
||||||
if [ "${opt}" = ":" ] || [[ "${OPTARG-}" =~ ${OPTARG_REGEX} ]]; then
|
if [ "${opt}" = ":" ] || [[ "${OPTARG-}" =~ ${OPTARG_REGEX} ]]; then
|
||||||
if [ "${opt}" = ":" ]; then
|
if [ "${opt}" = ":" ]; then
|
||||||
@@ -86,71 +86,80 @@ while getopts ":o:d:h:f:p:e:i:j:raqnms" opt; do
|
|||||||
esac
|
esac
|
||||||
fi;
|
fi;
|
||||||
case $opt in
|
case $opt in
|
||||||
o|owner)
|
# o|owner)
|
||||||
|
o)
|
||||||
if [ -z "$owner" ]; then
|
if [ -z "$owner" ]; then
|
||||||
owner=$OPTARG;
|
owner=$OPTARG;
|
||||||
# if not standard user we need to set restore role
|
# if not standard user we need to set restore role
|
||||||
# so tables/etc get set to new user
|
# so tables/etc get set to new user
|
||||||
role="--no-owner --role $owner";
|
# role="--no-owner --role $owner";
|
||||||
|
PG_PARAM_ROLE=("--no-owner" "--role" "$owner");
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
d|database)
|
# d|database)
|
||||||
|
d)
|
||||||
if [ -z "$database" ]; then
|
if [ -z "$database" ]; then
|
||||||
database=$OPTARG;
|
database=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
e|encoding)
|
# e|encoding)
|
||||||
|
e)
|
||||||
if [ -z "$encoding" ]; then
|
if [ -z "$encoding" ]; then
|
||||||
encoding=$OPTARG;
|
encoding=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
f|file)
|
# f|file)
|
||||||
|
f)
|
||||||
if [ -z "$file" ]; then
|
if [ -z "$file" ]; then
|
||||||
file=$OPTARG;
|
file=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
h|hostname)
|
# h|hostname)
|
||||||
|
h)
|
||||||
if [ -z "$_host" ]; then
|
if [ -z "$_host" ]; then
|
||||||
# if local it is socket
|
# if local it is socket
|
||||||
if [ "$OPTARG" != "local" ]; then
|
if [ "$OPTARG" != "local" ]; then
|
||||||
host='-h '$OPTARG;
|
PG_PARAMS+=("-h" "${OPTARG}");
|
||||||
else
|
|
||||||
host='';
|
|
||||||
fi;
|
fi;
|
||||||
_host=$OPTARG;
|
_host=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
p|port)
|
# p|port)
|
||||||
|
p)
|
||||||
if [ -z "$port" ]; then
|
if [ -z "$port" ]; then
|
||||||
port='-p '$OPTARG;
|
PG_PARAMS+=("-p" "${OPTARG}");
|
||||||
_port=$OPTARG;
|
_port=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
i|ident)
|
# i|ident)
|
||||||
|
i)
|
||||||
if [ -z "$ident" ]; then
|
if [ -z "$ident" ]; then
|
||||||
ident=$OPTARG;
|
ident=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
j|jobs)
|
# j|jobs)
|
||||||
|
j)
|
||||||
MAX_JOBS=${OPTARG};
|
MAX_JOBS=${OPTARG};
|
||||||
;;
|
;;
|
||||||
q|quiet)
|
# q|quiet)
|
||||||
|
q)
|
||||||
NO_ASK=1;
|
NO_ASK=1;
|
||||||
;;
|
;;
|
||||||
r|redhat)
|
# r|redhat)
|
||||||
|
r)
|
||||||
REDHAT=1;
|
REDHAT=1;
|
||||||
;;
|
;;
|
||||||
a|amazon)
|
# n|dry-run)
|
||||||
AMAZON=1;
|
n)
|
||||||
;;
|
|
||||||
n|dry-run)
|
|
||||||
DRY_RUN=1;
|
DRY_RUN=1;
|
||||||
;;
|
;;
|
||||||
s|schema-only)
|
# s|schema-only)
|
||||||
|
s)
|
||||||
SCHEMA_ONLY=1
|
SCHEMA_ONLY=1
|
||||||
schema='-s';
|
schema='-s';
|
||||||
;;
|
;;
|
||||||
m|help)
|
# m|help)
|
||||||
|
m)
|
||||||
usage;
|
usage;
|
||||||
exit 0;
|
exit 0;
|
||||||
;;
|
;;
|
||||||
@@ -162,10 +171,10 @@ while getopts ":o:d:h:f:p:e:i:j:raqnms" opt; do
|
|||||||
esac;
|
esac;
|
||||||
done;
|
done;
|
||||||
|
|
||||||
if [ "$REDHAT" -eq 1 ] && [ "$AMAZON" -eq 1 ]; then
|
if [ "${ERROR}" -eq 1 ]; then
|
||||||
echo "You cannot set the -a and -r flag at the same time";
|
exit 0;
|
||||||
exit 1;
|
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
# check that the port is a valid number
|
# check that the port is a valid number
|
||||||
if ! [[ "$_port" =~ $PORT_REGEX ]]; then
|
if ! [[ "$_port" =~ $PORT_REGEX ]]; then
|
||||||
echo "The port needs to be a valid number: $_port";
|
echo "The port needs to be a valid number: $_port";
|
||||||
@@ -174,16 +183,15 @@ fi;
|
|||||||
NUMBER_REGEX="^[0-9]{1,}$";
|
NUMBER_REGEX="^[0-9]{1,}$";
|
||||||
# find the max allowed jobs based on the cpu count
|
# find the max allowed jobs based on the cpu count
|
||||||
# because setting more than this is not recommended
|
# because setting more than this is not recommended
|
||||||
cpu=$(cat /proc/cpuinfo | grep processor | tail -n 1);
|
_max_jobs=$(nproc --all);
|
||||||
_max_jobs=$[ ${cpu##*: }+1 ] # +1 because cpu count starts with 0
|
|
||||||
# if the MAX_JOBS is not number or smaller 1 or greate _max_jobs
|
# if the MAX_JOBS is not number or smaller 1 or greate _max_jobs
|
||||||
if [ ! -z "${MAX_JOBS}" ]; then
|
if [ -n "${MAX_JOBS}" ]; then
|
||||||
# check that it is a valid number
|
# check that it is a valid number
|
||||||
if ! [[ "$MAX_JOBS" =~ $NUMBER_REGEX ]]; then
|
if ! [[ "$MAX_JOBS" =~ $NUMBER_REGEX ]]; then
|
||||||
echo "Please enter a number for the -j option";
|
echo "Please enter a number for the -j option";
|
||||||
exit 1;
|
exit 1;
|
||||||
fi;
|
fi;
|
||||||
if [ "${MAX_JOBS}" -lt 1 ] || [ "${MAX_JOBS}" -gt ${_max_jobs} ]; then
|
if [ "${MAX_JOBS}" -lt 1 ] || [ "${MAX_JOBS}" -gt "${_max_jobs}" ]; then
|
||||||
echo "The value for the jobs option -j cannot be smaller than 1 or bigger than ${_max_jobs}";
|
echo "The value for the jobs option -j cannot be smaller than 1 or bigger than ${_max_jobs}";
|
||||||
exit 1;
|
exit 1;
|
||||||
fi;
|
fi;
|
||||||
@@ -208,22 +216,21 @@ fi;
|
|||||||
# PARAMS: timestamp in seconds or with milliseconds (nnnn.nnnn)
|
# PARAMS: timestamp in seconds or with milliseconds (nnnn.nnnn)
|
||||||
# RETURN: formated string with human readable time (d/h/m/s)
|
# RETURN: formated string with human readable time (d/h/m/s)
|
||||||
# CALL : var=$(convert_time $timestamp);
|
# CALL : var=$(convert_time $timestamp);
|
||||||
# DESC : converts a timestamp or a timestamp with float milliseconds
|
# DESC : converts a timestamp or a timestamp with float milliseconds to a human readable format
|
||||||
# to a human readable format
|
|
||||||
# output is in days/hours/minutes/seconds
|
# output is in days/hours/minutes/seconds
|
||||||
function convert_time
|
function convert_time
|
||||||
{
|
{
|
||||||
timestamp=${1};
|
timestamp=${1};
|
||||||
# round to four digits for ms
|
# round to four digits for ms
|
||||||
timestamp=$(printf "%1.4f" $timestamp);
|
timestamp=$(printf "%1.4f" "$timestamp");
|
||||||
# get the ms part and remove any leading 0
|
# get the ms part and remove any leading 0
|
||||||
ms=$(echo ${timestamp} | cut -d "." -f 2 | sed -e 's/^0*//');
|
ms=$(echo "${timestamp}" | cut -d "." -f 2 | sed -e 's/^0*//');
|
||||||
timestamp=$(echo ${timestamp} | cut -d "." -f 1);
|
timestamp=$(echo "${timestamp}" | cut -d "." -f 1);
|
||||||
timegroups=(86400 3600 60 1); # day, hour, min, sec
|
timegroups=(86400 3600 60 1); # day, hour, min, sec
|
||||||
timenames=("d" "h" "m" "s"); # day, hour, min, sec
|
timenames=("d" "h" "m" "s"); # day, hour, min, sec
|
||||||
output=( );
|
output=( );
|
||||||
time_string=;
|
time_string='';
|
||||||
for timeslice in ${timegroups[@]}; do
|
for timeslice in "${timegroups[@]}"; do
|
||||||
# floor for the division, push to output
|
# floor for the division, push to output
|
||||||
if [ ${BC_OK} -eq 1 ]; then
|
if [ ${BC_OK} -eq 1 ]; then
|
||||||
output[${#output[*]}]=$(echo "${timestamp}/${timeslice}" | bc);
|
output[${#output[*]}]=$(echo "${timestamp}/${timeslice}" | bc);
|
||||||
@@ -235,15 +242,19 @@ function convert_time
|
|||||||
done;
|
done;
|
||||||
|
|
||||||
for ((i=0; i<${#output[@]}; i++)); do
|
for ((i=0; i<${#output[@]}; i++)); do
|
||||||
if [ ${output[$i]} -gt 0 ] || [ ! -z "$time_string" ]; then
|
if [ "${output[$i]}" -gt 0 ] || [ -n "$time_string" ]; then
|
||||||
if [ ! -z "${time_string}" ]; then
|
if [ -n "${time_string}" ]; then
|
||||||
time_string=${time_string}" ";
|
time_string=${time_string}" ";
|
||||||
fi;
|
fi;
|
||||||
time_string=${time_string}${output[$i]}${timenames[$i]};
|
time_string=${time_string}${output[$i]}${timenames[$i]};
|
||||||
fi;
|
fi;
|
||||||
done;
|
done;
|
||||||
if [ ! -z ${ms} ] && [ ${ms} -gt 0 ];; then
|
# milliseconds must be filled, but we also check that they are non "nan" string
|
||||||
time_string=${time_string}" "${ms}"ms";
|
# that can appear in the original value
|
||||||
|
if [ -n "${ms}" ] && [ "${ms}" != "nan" ]; then
|
||||||
|
if [ "${ms}" -gt 0 ]; then
|
||||||
|
time_string="${time_string} ${ms}ms";
|
||||||
|
fi;
|
||||||
fi;
|
fi;
|
||||||
# just in case the time is 0
|
# just in case the time is 0
|
||||||
if [ -z "${time_string}" ]; then
|
if [ -z "${time_string}" ]; then
|
||||||
@@ -253,20 +264,20 @@ function convert_time
|
|||||||
}
|
}
|
||||||
|
|
||||||
# for the auto find, we need to get only the filename, and therefore remove all path info
|
# for the auto find, we need to get only the filename, and therefore remove all path info
|
||||||
db_file=`basename $file`;
|
db_file=$(basename "$file");
|
||||||
# if file is set and exist, but no owner or database are given, use the file name data to get user & database
|
# if file is set and exist, but no owner or database are given, use the file name data to get user & database
|
||||||
if [ -r "$file" ] && ( [ ! "$owner" ] || [ ! "$database" ] || [ ! "$encoding" ] ); then
|
if [ -r "$file" ] && { [ ! "$owner" ] || [ ! "$database" ] || [ ! "$encoding" ]; }; then
|
||||||
# file name format is
|
# file name format is
|
||||||
# <database>.<owner>.<encoding>.<db type>-<version>_<host>_<port>_<date>_<time>_<sequence>
|
# <database>.<owner>.<encoding>.<db type>-<version>_<host>_<port>_<date>_<time>_<sequence>
|
||||||
# we only are interested in the first two
|
# we only are interested in the first two
|
||||||
_database=`echo $db_file | cut -d "." -f 1`;
|
_database=$(echo "${db_file}" | cut -d "." -f 1);
|
||||||
_owner=`echo $db_file | cut -d "." -f 2`;
|
_owner=$(echo "${db_file}" | cut -d "." -f 2);
|
||||||
__encoding=`echo $db_file | cut -d "." -f 3`;
|
__encoding=$(echo "${db_file}" | cut -d "." -f 3);
|
||||||
# set the others as optional
|
# set the others as optional
|
||||||
_ident=`echo $db_file | cut -d "." -f 4 | cut -d "-" -f 2`; # db version first part
|
_ident=$(echo "${db_file}" | cut -d "." -f 4 | cut -d "-" -f 2); # db version first part
|
||||||
_ident=$_ident'.'`echo $db_file | cut -d "." -f 5 | cut -d "_" -f 1`; # db version, second part (after .)
|
_ident=$_ident'.'$(echo "${db_file}" | cut -d "." -f 5 | cut -d "_" -f 1); # db version, second part (after .)
|
||||||
__host=`echo $db_file | cut -d "." -f 4 | cut -d "_" -f 2`;
|
__host=$(echo "${db_file}" | cut -d "." -f 4 | cut -d "_" -f 2);
|
||||||
__port=`echo $db_file | cut -d "." -f 4 | cut -d "_" -f 3`;
|
__port=$(echo "${db_file}" | cut -d "." -f 4 | cut -d "_" -f 3);
|
||||||
# if any of those are not set, override by the file name settings
|
# if any of those are not set, override by the file name settings
|
||||||
if [ ! "$owner" ]; then
|
if [ ! "$owner" ]; then
|
||||||
owner=$_owner;
|
owner=$_owner;
|
||||||
@@ -285,7 +296,7 @@ if [ -r "$file" ] && ( [ ! "$owner" ] || [ ! "$database" ] || [ ! "$encoding" ]
|
|||||||
_host=$__host;
|
_host=$__host;
|
||||||
fi;
|
fi;
|
||||||
if [ ! "$encoding" ]; then
|
if [ ! "$encoding" ]; then
|
||||||
if [ ! -z "$__encoding" ]; then
|
if [ -n "$__encoding" ]; then
|
||||||
encoding=$__encoding;
|
encoding=$__encoding;
|
||||||
else
|
else
|
||||||
encoding=$_encoding;
|
encoding=$_encoding;
|
||||||
@@ -308,43 +319,50 @@ fi;
|
|||||||
|
|
||||||
if [ "$REDHAT" -eq 1 ]; then
|
if [ "$REDHAT" -eq 1 ]; then
|
||||||
# Debian base path
|
# Debian base path
|
||||||
PG_BASE_PATH='/usr/pgsql-';
|
PG_BASE_PATH="/usr/pgsql-";
|
||||||
elif [ "$AMAZON" -eq 1 ]; then
|
|
||||||
PG_BASE_PATH='/usr/lib64/pgsql';
|
|
||||||
else
|
else
|
||||||
# Redhat base path (for non official ones would be '/usr/pgsql-'
|
# Redhat base path (for non official ones would be '/usr/pgsql-'
|
||||||
PG_BASE_PATH='/usr/lib/postgresql/';
|
PG_BASE_PATH="/usr/lib/postgresql/";
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
# if no ident is given, try to find the default one, if not fall back to pre set one
|
# if no ident is given, try to find the default one, if not fall back to pre set one
|
||||||
if [ ! -z "$ident" ]; then
|
if [ -n "$ident" ]; then
|
||||||
PG_PATH=$PG_BASE_PATH$ident'/bin/';
|
PG_PATH="${PG_BASE_PATH}${ident}/bin/";
|
||||||
if [ ! -d "$PG_PATH" ]; then
|
if [ ! -d "$PG_PATH" ]; then
|
||||||
ident='';
|
ident='';
|
||||||
fi;
|
fi;
|
||||||
fi;
|
else
|
||||||
if [ -z "$ident" ]; then
|
|
||||||
# try to run psql from default path and get the version number
|
# try to run psql from default path and get the version number
|
||||||
ident=$(pgv=$(pg_dump --version| grep "pg_dump" | cut -d " " -f 3); if [[ $(echo "${pgv}" | cut -d "." -f 1) -ge 10 ]]; then echo "${pgv}" | cut -d "." -f 1; else echo "${pgv}" | cut -d "." -f 1,2; fi );
|
# ident=$(pgv=$(pg_dump --version| grep "pg_dump" | cut -d " " -f 3); if [[ $(echo "${pgv}" | cut -d "." -f 1) -ge 10 ]]; then echo "${pgv}" | cut -d "." -f 1; else echo "${pgv}" | cut -d "." -f 1,2; fi );
|
||||||
if [ ! -z "$ident" ]; then
|
ident=$(
|
||||||
PG_PATH=$PG_BASE_PATH$ident'/bin/';
|
pgv=$(
|
||||||
else
|
"pg_dump" --version | \
|
||||||
|
grep "pg_dump" | \
|
||||||
|
cut -d " " -f 3
|
||||||
|
);
|
||||||
|
if [[ $(echo "${pgv}" | cut -d "." -f 1) -ge 10 ]]; then
|
||||||
|
echo "${pgv}" | cut -d "." -f 1;
|
||||||
|
else
|
||||||
|
echo "${pgv}" | cut -d "." -f 1,2;
|
||||||
|
fi
|
||||||
|
);
|
||||||
|
if [ -z "$ident" ]; then
|
||||||
# hard setting
|
# hard setting
|
||||||
ident='9.6';
|
ident='15';
|
||||||
PG_PATH=$PG_BASE_PATH'9.6/bin/';
|
|
||||||
fi;
|
fi;
|
||||||
|
PG_PATH="${PG_BASE_PATH}${ident}/bin/'";
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
PG_DROPDB=$PG_PATH"dropdb";
|
PG_DROPDB="${PG_PATH}dropdb";
|
||||||
PG_CREATEDB=$PG_PATH"createdb";
|
PG_CREATEDB="${PG_PATH}createdb";
|
||||||
PG_CREATELANG=$PG_PATH"createlang";
|
PG_CREATELANG="${PG_PATH}createlang";
|
||||||
PG_RESTORE=$PG_PATH"pg_restore";
|
PG_RESTORE="${PG_PATH}pg_restore";
|
||||||
PG_PSQL=$PG_PATH"psql";
|
PG_PSQL="${PG_PATH}psql";
|
||||||
TEMP_FILE="temp";
|
TEMP_FILE="temp";
|
||||||
LOG_FILE_EXT=$database.`date +"%Y%m%d_%H%M%S"`".log";
|
LOG_FILE_EXT="${database}.$(date +"%Y%m%d_%H%M%S").log";
|
||||||
|
|
||||||
# core abort if no core files found
|
# core abort if no core files found
|
||||||
if [ ! -f $PG_PSQL ] || [ ! -f $PG_DROPDB ] || [ ! -f $PG_CREATEDB ] || [ ! -f $PG_RESTORE ]; then
|
if [ ! -f "$PG_PSQL" ] || [ ! -f "$PG_DROPDB" ] || [ ! -f "$PG_CREATEDB" ] || [ ! -f "$PG_RESTORE" ]; then
|
||||||
echo "One of the core binaries (psql, pg_dump, createdb, pg_restore) could not be found.";
|
echo "One of the core binaries (psql, pg_dump, createdb, pg_restore) could not be found.";
|
||||||
echo "Search Path: ${PG_PATH}";
|
echo "Search Path: ${PG_PATH}";
|
||||||
echo "Perhaps manual ident set with -i is necessary";
|
echo "Perhaps manual ident set with -i is necessary";
|
||||||
@@ -354,11 +372,13 @@ fi;
|
|||||||
|
|
||||||
# check if port / host settings are OK
|
# check if port / host settings are OK
|
||||||
# if I cannot connect with user postgres to template1, the restore won't work
|
# if I cannot connect with user postgres to template1, the restore won't work
|
||||||
output=`echo "SELECT version();" | $PG_PSQL -U postgres $host $port template1 -q -t -X -A -F "," 2>&1`;
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
found=`echo "$output" | grep "PostgreSQL"`;
|
_PG_PARAMS+=("-U" "postgres" "template1" "-q" "-t" "-X" "-A" "-F" "," "-c" "SELECT version();");
|
||||||
|
_output=$("${PG_PSQL}" "${PG_PARAMS[@]}" 2>&1);
|
||||||
|
found=$(echo "$_output" | grep "PostgreSQL");
|
||||||
# if the output does not have the PG version string, we have an error and abort
|
# if the output does not have the PG version string, we have an error and abort
|
||||||
if [ -z "$found" ]; then
|
if [ -z "$found" ]; then
|
||||||
echo "Cannot connect to the database: $output";
|
echo "Cannot connect to the database: $_output";
|
||||||
exit 1;
|
exit 1;
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
@@ -370,66 +390,92 @@ if [ $NO_ASK -eq 1 ]; then
|
|||||||
go='yes';
|
go='yes';
|
||||||
else
|
else
|
||||||
echo "Continue? type 'yes'";
|
echo "Continue? type 'yes'";
|
||||||
read go;
|
read -r go;
|
||||||
fi;
|
fi;
|
||||||
if [ "$go" != 'yes' ]; then
|
if [ "$go" != 'yes' ]; then
|
||||||
echo "Aborted";
|
echo "Aborted";
|
||||||
exit;
|
exit;
|
||||||
else
|
else
|
||||||
start_time=`date +"%F %T"`;
|
start_time=$(date +"%F %T");
|
||||||
START=`date +'%s'`;
|
START=$(date +'%s');
|
||||||
echo "Drop DB $database [$_host:$_port] @ $start_time";
|
echo "Drop DB $database [$_host:$_port] @ $start_time";
|
||||||
# DROP DATABASE
|
# DROP DATABASE
|
||||||
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("-U" "postgres" "${database}");
|
||||||
if [ $DRY_RUN -eq 0 ]; then
|
if [ $DRY_RUN -eq 0 ]; then
|
||||||
$PG_DROPDB -U postgres $host $port $database;
|
# $PG_DROPDB -U postgres $host $port $database;
|
||||||
|
"${PG_DROPDB}" "${PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo $PG_DROPDB -U postgres $host $port $database;
|
echo "${PG_DROPDB} ${PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
# CREATE DATABASE
|
# CREATE DATABASE
|
||||||
echo "Create DB $database with $owner and encoding $encoding on [$_host:$_port] @ `date +"%F %T"`";
|
echo "Create DB $database with $owner and encoding $encoding on [$_host:$_port] @ $(date +"%F %T")";
|
||||||
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("-U" "postgres" "-O" "${owner}" "-E" "${encoding}" "-T" "${TEMPLATEDB}" "${database}");
|
||||||
if [ $DRY_RUN -eq 0 ]; then
|
if [ $DRY_RUN -eq 0 ]; then
|
||||||
$PG_CREATEDB -U postgres -O $owner -E $encoding -T $TEMPLATEDB $host $port $database;
|
# $PG_CREATEDB -U postgres -O $owner -E $encoding -T $TEMPLATEDB $host $port $database;
|
||||||
|
"${PG_CREATEDB}" "${_PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo $PG_CREATEDB -U postgres -O $owner -E $encoding -T $TEMPLATEDB $host $port $database;
|
echo "${PG_CREATEDB} ${_PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
# CREATE plpgsql LANG
|
# CREATE plpgsql LANG
|
||||||
if [ -f $PG_CREATELANG ]; then
|
if [ -f "$PG_CREATELANG" ]; then
|
||||||
echo "Create plpgsql lang in DB $database on [$_host:$_port] @ `date +"%F %T"`";
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("-U" "postgres" "plpgsql" "${database}");
|
||||||
|
echo "Create plpgsql lang in DB $database on [$_host:$_port] @ $(date +"%F %T")";
|
||||||
if [ $DRY_RUN -eq 0 ]; then
|
if [ $DRY_RUN -eq 0 ]; then
|
||||||
$PG_CREATELANG -U postgres plpgsql $host $port $database;
|
# $PG_CREATELANG -U postgres plpgsql $host $port $database;
|
||||||
|
"${PG_CREATELANG}" "${_PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo $PG_CREATELANG -U postgres plpgsql $host $port $database;
|
echo "${PG_CREATELANG} ${_PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
fi;
|
fi;
|
||||||
# RESTORE DATA
|
# RESTORE DATA
|
||||||
echo "Restore data from $file to DB $database on [$_host:$_port] with Jobs $MAX_JOBS @ `date +"%F %T"`";
|
echo "Restore data from $file to DB $database on [$_host:$_port] with Jobs $MAX_JOBS @ $(date +"%F %T")";
|
||||||
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("${PG_PARAM_ROLE[@]}")
|
||||||
|
_PG_PARAMS+=("-U" "postgres" "-d" "${database}" "-F" "c" "-v" "-c" "${schema}" "-j" "${MAX_JOBS}" "${file}");
|
||||||
if [ $DRY_RUN -eq 0 ]; then
|
if [ $DRY_RUN -eq 0 ]; then
|
||||||
$PG_RESTORE -U postgres -d $database -F c -v -c $schema -j $MAX_JOBS $host $port $role $file 2>restore_errors.$LOG_FILE_EXT;
|
# $PG_RESTORE -U postgres -d $database -F c -v -c $schema -j $MAX_JOBS $host $port $role $file 2>restore_errors.$LOG_FILE_EXT;
|
||||||
|
"${PG_RESTORE}" "${_PG_PARAMS[@]}" 2>"restore_errors.${LOG_FILE_EXT}";
|
||||||
else
|
else
|
||||||
echo $PG_RESTORE -U postgres -d $database -F c -v -c $schema -j $MAX_JOBS $host $port $role $file 2>restore_errors.$LOG_FILE_EXT;
|
echo "${PG_RESTORE} ${_PG_PARAMS[*]} 2>restore_errors.${LOG_FILE_EXT}";
|
||||||
fi;
|
fi;
|
||||||
# BUG FIX FOR POSTGRESQL 9.6.2 db_dump
|
# BUG FIX FOR POSTGRESQL 9.6.2 db_dump
|
||||||
# it does not dump the default public ACL so the owner of the DB cannot access the data, check if the ACL dump is missing and do a basic restore
|
# it does not dump the default public ACL so the owner of the DB cannot access the data, check if the ACL dump is missing and do a basic restore
|
||||||
if [ -z $($PG_RESTORE -l $file | grep -- "ACL - public postgres") ]; then
|
# if [ -z "$("$PG_RESTORE" -l "$file" | grep -- "ACL - public postgres")" ]; then
|
||||||
echo "Fixing missing basic public schema ACLs from DB $database [$_host:$_port] @ `date +"%F %T"`";
|
if ! "$("${PG_RESTORE}" -l "$file" | grep -q -- "ACL - public postgres")"; then
|
||||||
|
echo "Fixing missing basic public schema ACLs from DB $database [$_host:$_port] @ $(date +"%F %T")";
|
||||||
|
# echo "GRANT USAGE ON SCHEMA public TO public;" | $PG_PSQL -U postgres -Atq $host $port $database;
|
||||||
|
# echo "GRANT CREATE ON SCHEMA public TO public;" | $PG_PSQL -U postgres -Atq $host $port $database;
|
||||||
# grant usage on schema public to public;
|
# grant usage on schema public to public;
|
||||||
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("-U" "postgres" "-Atq" "-c" "GRANT USAGE ON SCHEMA public TO public;" "${database}");
|
||||||
|
"${PG_PSQL}" "${_PG_PARAMS[@]}";
|
||||||
# grant create on schema public to public;
|
# grant create on schema public to public;
|
||||||
echo "GRANT USAGE ON SCHEMA public TO public;" | $PG_PSQL -U postgres -Atq $host $port $database;
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
echo "GRANT CREATE ON SCHEMA public TO public;" | $PG_PSQL -U postgres -Atq $host $port $database;
|
_PG_PARAMS+=("-U" "postgres" "-Atq" "-c" "GRANT CREATE ON SCHEMA public TO public;" "${database}");
|
||||||
|
"${PG_PSQL}" "${_PG_PARAMS[@]}";
|
||||||
fi;
|
fi;
|
||||||
# SEQUENCE RESET DATA COLLECTION
|
# SEQUENCE RESET DATA COLLECTION
|
||||||
echo "Resetting all sequences from DB $database [$_host:$_port] @ `date +"%F %T"`";
|
echo "Resetting all sequences from DB $database [$_host:$_port] @ $(date +"%F %T")";
|
||||||
reset_query="SELECT 'SELECT SETVAL(' ||quote_literal(S.relname)|| ', MAX(' ||quote_ident(C.attname)|| ') ) FROM ' ||quote_ident(T.relname)|| ';' FROM pg_class AS S, pg_depend AS D, pg_class AS T, pg_attribute AS C WHERE S.relkind = 'S' AND S.oid = D.objid AND D.refobjid = T.oid AND D.refobjid = C.attrelid AND D.refobjsubid = C.attnum ORDER BY S.relname;";
|
reset_query="SELECT 'SELECT SETVAL(' ||quote_literal(S.relname)|| ', MAX(' ||quote_ident(C.attname)|| ') ) FROM ' ||quote_ident(T.relname)|| ';' FROM pg_class AS S, pg_depend AS D, pg_class AS T, pg_attribute AS C WHERE S.relkind = 'S' AND S.oid = D.objid AND D.refobjid = T.oid AND D.refobjid = C.attrelid AND D.refobjsubid = C.attnum ORDER BY S.relname;";
|
||||||
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("-U" "postgres" "-Atq" "-o" "${TEMP_FILE}" "-c" "${reset_query}" "${database}");
|
||||||
|
_PG_PARAMS_OUT=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS_OUT+=("-U" "postgres" "-e" "-f" "${TEMP_FILE}" "${database}");
|
||||||
if [ $DRY_RUN -eq 0 ]; then
|
if [ $DRY_RUN -eq 0 ]; then
|
||||||
echo "${reset_query}" | $PG_PSQL -U postgres -Atq $host $port -o $TEMP_FILE $database
|
# echo "${reset_query}" | $PG_PSQL -U postgres -Atq $host $port -o $TEMP_FILE $database
|
||||||
$PG_PSQL -U postgres $host $port -e -f $TEMP_FILE $database 1>output_sequence.$LOG_FILE_EXT 2>errors_sequence.$database.$LOG_FILE_EXT;
|
"${PG_PSQL}" "${_PG_PARAMS[@]}";
|
||||||
rm $TEMP_FILE;
|
# $PG_PSQL -U postgres $host $port -e -f $TEMP_FILE $database 1>output_sequence.$LOG_FILE_EXT 2>errors_sequence.$database.$LOG_FILE_EXT;
|
||||||
|
"${PG_PSQL}" "${_PG_PARAMS_OUT[@]}" 1>"output_sequence.${LOG_FILE_EXT}" 2>"errors_sequence.${database}.${LOG_FILE_EXT}";
|
||||||
|
rm "${TEMP_FILE}";
|
||||||
else
|
else
|
||||||
echo "${reset_query}";
|
echo "${reset_query}";
|
||||||
echo $PG_PSQL -U postgres $host $port -e -f $TEMP_FILE $database 1>output_sequence.$LOG_FILE_EXT 2>errors_sequence.$database.$LOG_FILE_EXT;
|
echo "${PG_PSQL} ${_PG_PARAMS_OUT[*]} 1>output_sequence.${LOG_FILE_EXT} 2>errors_sequence.${database}.${LOG_FILE_EXT}";
|
||||||
fi;
|
fi;
|
||||||
echo "Restore of data $file for DB $database [$_host:$_port] finished";
|
echo "Restore of data $file for DB $database [$_host:$_port] finished";
|
||||||
DURATION=$[ `date +'%s'`-$START ];
|
DURATION=$(($(date "+%s")-START));
|
||||||
echo "Start at $start_time and end at `date +"%F %T"` and ran for $(convert_time ${DURATION})";
|
echo "Start at $start_time and end at $(date +"%F %T") and ran for $(convert_time ${DURATION})";
|
||||||
echo "=== END RESTORE" >>restore_errors.$LOG_FILE_EXT;
|
echo "=== END RESTORE" >>"restore_errors.${LOG_FILE_EXT}";
|
||||||
fi;
|
fi;
|
||||||
|
|||||||
@@ -1,11 +1,15 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Author: Clemens Schwaighofer
|
||||||
|
# Description:
|
||||||
|
# Drop and restore databases from a folder for dump files created by pg_db_dump_file.sh
|
||||||
|
|
||||||
function usage ()
|
function usage ()
|
||||||
{
|
{
|
||||||
cat <<- EOT
|
cat <<- EOT
|
||||||
Restores a list of database dumps from a folder to a database server
|
Restores a list of database dumps from a folder to a database server
|
||||||
|
|
||||||
Usage: ${0##/*/} -f <dump folder> [-j <JOBS>] [-e <ENCODING>] [-h <HOST>] [-r|-a] [-g] [-n]
|
Usage: ${0##/*/} -f <dump folder> [-j <JOBS>] [-e <ENCODING>] [-h <HOST>] [-r] [-g] [-n]
|
||||||
|
|
||||||
-e <ENCODING>: override global encoding, will be overruled by per file encoding
|
-e <ENCODING>: override global encoding, will be overruled by per file encoding
|
||||||
-p <PORT>: override default port from file.
|
-p <PORT>: override default port from file.
|
||||||
@@ -13,7 +17,6 @@ function usage ()
|
|||||||
-f: dump folder source. Where the database dump files are located. This is a must set option
|
-f: dump folder source. Where the database dump files are located. This is a must set option
|
||||||
-j <JOBS>: Run how many jobs Parallel. If not set, 2 jobs are run parallel
|
-j <JOBS>: Run how many jobs Parallel. If not set, 2 jobs are run parallel
|
||||||
-r: use redhat base paths instead of debian
|
-r: use redhat base paths instead of debian
|
||||||
-a: use amazon base paths instead of debian
|
|
||||||
-g: do not import globals file
|
-g: do not import globals file
|
||||||
-n: dry run, do not import or change anything
|
-n: dry run, do not import or change anything
|
||||||
EOT
|
EOT
|
||||||
@@ -26,7 +29,6 @@ HOST='';
|
|||||||
_encoding='UTF8';
|
_encoding='UTF8';
|
||||||
set_encoding='';
|
set_encoding='';
|
||||||
REDHAT=0;
|
REDHAT=0;
|
||||||
AMAZON=0;
|
|
||||||
IMPORT_GLOBALS=1;
|
IMPORT_GLOBALS=1;
|
||||||
TEMPLATEDB='template0'; # truly empty for restore
|
TEMPLATEDB='template0'; # truly empty for restore
|
||||||
DUMP_FOLDER='';
|
DUMP_FOLDER='';
|
||||||
@@ -35,8 +37,10 @@ BC='/usr/bin/bc';
|
|||||||
PORT_REGEX="^[0-9]{4,5}$";
|
PORT_REGEX="^[0-9]{4,5}$";
|
||||||
OPTARG_REGEX="^-";
|
OPTARG_REGEX="^-";
|
||||||
DRY_RUN=0;
|
DRY_RUN=0;
|
||||||
|
PG_PARAM_HOST=();
|
||||||
|
PG_PARAM_PORT=();
|
||||||
# options check
|
# options check
|
||||||
while getopts ":f:j:h:p:e:granm" opt; do
|
while getopts ":f:j:h:p:e:grnm" opt; do
|
||||||
# pre test for unfilled
|
# pre test for unfilled
|
||||||
if [ "${opt}" = ":" ] || [[ "${OPTARG-}" =~ ${OPTARG_REGEX} ]]; then
|
if [ "${opt}" = ":" ] || [[ "${OPTARG-}" =~ ${OPTARG_REGEX} ]]; then
|
||||||
if [ "${opt}" = ":" ]; then
|
if [ "${opt}" = ":" ]; then
|
||||||
@@ -68,47 +72,53 @@ while getopts ":f:j:h:p:e:granm" opt; do
|
|||||||
esac
|
esac
|
||||||
fi;
|
fi;
|
||||||
case $opt in
|
case $opt in
|
||||||
f|file)
|
# f|file)
|
||||||
|
f)
|
||||||
DUMP_FOLDER=$OPTARG;
|
DUMP_FOLDER=$OPTARG;
|
||||||
;;
|
;;
|
||||||
j|jobs)
|
# j|jobs)
|
||||||
|
j)
|
||||||
MAX_JOBS=${OPTARG};
|
MAX_JOBS=${OPTARG};
|
||||||
;;
|
;;
|
||||||
e|encoding)
|
# e|encoding)
|
||||||
|
e)
|
||||||
if [ -z "$encoding" ]; then
|
if [ -z "$encoding" ]; then
|
||||||
encoding=$OPTARG;
|
encoding=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
h|hostname)
|
# h|hostname)
|
||||||
|
h)
|
||||||
if [ -z "$host" ]; then
|
if [ -z "$host" ]; then
|
||||||
# do not set if local name (uses socket)
|
# do not set if local name (uses socket)
|
||||||
if [ "$OPTARG" != "local" ]; then
|
if [ "$OPTARG" != "local" ]; then
|
||||||
host='-h '$OPTARG;
|
PG_PARAM_HOST=("-h" "${OPTARG}");
|
||||||
fi;
|
fi;
|
||||||
_host=$OPTARG;
|
_host=$OPTARG;
|
||||||
HOST=$OPRTARG;
|
HOST=$OPRTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
p|port)
|
# p|port)
|
||||||
|
p)
|
||||||
if [ -z "$port" ]; then
|
if [ -z "$port" ]; then
|
||||||
port='-p '$OPTARG;
|
PG_PARAM_PORT=("-p" "${OPTARG}");
|
||||||
_port=$OPTARG;
|
_port=$OPTARG;
|
||||||
PORT=$OPTARG;
|
PORT=$OPTARG;
|
||||||
fi;
|
fi;
|
||||||
;;
|
;;
|
||||||
g|globals)
|
# g|globals)
|
||||||
|
g)
|
||||||
IMPORT_GLOBALS=0;
|
IMPORT_GLOBALS=0;
|
||||||
;;
|
;;
|
||||||
r|redhat)
|
# r|redhat)
|
||||||
|
r)
|
||||||
REDHAT=1;
|
REDHAT=1;
|
||||||
;;
|
;;
|
||||||
a|amazon)
|
# n|dry-run)
|
||||||
AMAZON=1;
|
n)
|
||||||
;;
|
|
||||||
n|dry-run)
|
|
||||||
DRY_RUN=1;
|
DRY_RUN=1;
|
||||||
;;
|
;;
|
||||||
m|help)
|
# m|help)
|
||||||
|
m)
|
||||||
usage;
|
usage;
|
||||||
exit 0;
|
exit 0;
|
||||||
;;
|
;;
|
||||||
@@ -120,19 +130,16 @@ while getopts ":f:j:h:p:e:granm" opt; do
|
|||||||
esac;
|
esac;
|
||||||
done;
|
done;
|
||||||
|
|
||||||
if [ "$REDHAT" -eq 1 ] && [ "$AMAZON" -eq 1 ]; then
|
if [ "${ERROR}" -eq 1 ]; then
|
||||||
echo "You cannot set the -a and -r flag at the same time";
|
exit 0;
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
if [ "$REDHAT" -eq 1 ]; then
|
if [ "$REDHAT" -eq 1 ]; then
|
||||||
# Redhat base path (for non official ones would be '/usr/pgsql-'
|
# Redhat base path (for non official ones would be '/usr/pgsql-'
|
||||||
DBPATH_BASE='/usr/pgsql-'
|
PG_BASE_PATH='/usr/pgsql-'
|
||||||
elif [ "$AMAZON" -eq 1 ]; then
|
|
||||||
# Amazon paths (lib64 default amazon package)
|
|
||||||
DBPATH_BASE='/usr/lib64/pgsql';
|
|
||||||
else
|
else
|
||||||
# Debian base path
|
# Debian base path
|
||||||
DBPATH_BASE='/usr/lib/postgresql/';
|
PG_BASE_PATH='/usr/lib/postgresql/';
|
||||||
fi;
|
fi;
|
||||||
|
|
||||||
# check that the port is a valid number
|
# check that the port is a valid number
|
||||||
@@ -145,16 +152,15 @@ NUMBER_REGEX="^[0-9]{1,}$";
|
|||||||
# find the max allowed jobs based on the cpu count
|
# find the max allowed jobs based on the cpu count
|
||||||
# because setting more than this is not recommended
|
# because setting more than this is not recommended
|
||||||
# so this fails in vmware hosts were we have random cpus assigned
|
# so this fails in vmware hosts were we have random cpus assigned
|
||||||
cpu=$(cat /proc/cpuinfo | grep "processor" | wc -l);
|
_max_jobs=$(nproc --all);
|
||||||
_max_jobs=${cpu##*: };
|
|
||||||
# if the MAX_JOBS is not number or smaller 1 or greate _max_jobs
|
# if the MAX_JOBS is not number or smaller 1 or greate _max_jobs
|
||||||
if [ ! -z "${MAX_JOBS}" ]; then
|
if [ -n "${MAX_JOBS}" ]; then
|
||||||
# check that it is a valid number
|
# check that it is a valid number
|
||||||
if [[ ! ${MAX_JOBS} =~ ${NUMBER_REGEX} ]]; then
|
if [[ ! ${MAX_JOBS} =~ ${NUMBER_REGEX} ]]; then
|
||||||
echo "Please enter a number for the -j option";
|
echo "Please enter a number for the -j option";
|
||||||
exit 1;
|
exit 1;
|
||||||
fi;
|
fi;
|
||||||
if [ ${MAX_JOBS} -lt 1 ] || [ ${MAX_JOBS} -gt ${_max_jobs} ]; then
|
if [ "${MAX_JOBS}" -lt 1 ] || [ "${MAX_JOBS}" -gt "${_max_jobs}" ]; then
|
||||||
echo "The value for the jobs option -j cannot be smaller than 1 or bigger than ${_max_jobs}";
|
echo "The value for the jobs option -j cannot be smaller than 1 or bigger than ${_max_jobs}";
|
||||||
exit 1;
|
exit 1;
|
||||||
fi;
|
fi;
|
||||||
@@ -202,15 +208,15 @@ function convert_time
|
|||||||
{
|
{
|
||||||
timestamp=${1};
|
timestamp=${1};
|
||||||
# round to four digits for ms
|
# round to four digits for ms
|
||||||
timestamp=$(printf "%1.4f" $timestamp);
|
timestamp=$(printf "%1.4f" "$timestamp");
|
||||||
# get the ms part and remove any leading 0
|
# get the ms part and remove any leading 0
|
||||||
ms=$(echo ${timestamp} | cut -d "." -f 2 | sed -e 's/^0*//');
|
ms=$(echo "${timestamp}" | cut -d "." -f 2 | sed -e 's/^0*//');
|
||||||
timestamp=$(echo ${timestamp} | cut -d "." -f 1);
|
timestamp=$(echo "${timestamp}" | cut -d "." -f 1);
|
||||||
timegroups=(86400 3600 60 1); # day, hour, min, sec
|
timegroups=(86400 3600 60 1); # day, hour, min, sec
|
||||||
timenames=("d" "h" "m" "s"); # day, hour, min, sec
|
timenames=("d" "h" "m" "s"); # day, hour, min, sec
|
||||||
output=( );
|
output=( );
|
||||||
time_string='';
|
time_string='';
|
||||||
for timeslice in ${timegroups[@]}; do
|
for timeslice in "${timegroups[@]}"; do
|
||||||
# floor for the division, push to output
|
# floor for the division, push to output
|
||||||
if [ ${BC_OK} -eq 1 ]; then
|
if [ ${BC_OK} -eq 1 ]; then
|
||||||
output[${#output[*]}]=$(echo "${timestamp}/${timeslice}" | bc);
|
output[${#output[*]}]=$(echo "${timestamp}/${timeslice}" | bc);
|
||||||
@@ -222,16 +228,18 @@ function convert_time
|
|||||||
done;
|
done;
|
||||||
|
|
||||||
for ((i=0; i<${#output[@]}; i++)); do
|
for ((i=0; i<${#output[@]}; i++)); do
|
||||||
if [ ${output[$i]} -gt 0 ] || [ ! -z "$time_string" ]; then
|
if [ "${output[$i]}" -gt 0 ] || [ -n "$time_string" ]; then
|
||||||
if [ ! -z "${time_string}" ]; then
|
if [ -n "${time_string}" ]; then
|
||||||
time_string=${time_string}" ";
|
time_string=${time_string}" ";
|
||||||
fi;
|
fi;
|
||||||
time_string=${time_string}${output[$i]}${timenames[$i]};
|
time_string=${time_string}${output[$i]}${timenames[$i]};
|
||||||
fi;
|
fi;
|
||||||
done;
|
done;
|
||||||
if [ ! -z ${ms} ]; then
|
# milliseconds must be filled, but we also check that they are non "nan" string
|
||||||
if [ ${ms} -gt 0 ]; then
|
# that can appear in the original value
|
||||||
time_string=${time_string}" "${ms}"ms";
|
if [ -n "${ms}" ] && [ "${ms}" != "nan" ]; then
|
||||||
|
if [ "${ms}" -gt 0 ]; then
|
||||||
|
time_string="${time_string} ${ms}ms";
|
||||||
fi;
|
fi;
|
||||||
fi;
|
fi;
|
||||||
# just in case the time is 0
|
# just in case the time is 0
|
||||||
@@ -242,25 +250,22 @@ function convert_time
|
|||||||
}
|
}
|
||||||
|
|
||||||
# default version (for folder)
|
# default version (for folder)
|
||||||
DBPATH_VERSION='9.6/';
|
PG_PATH_VERSION='15/';
|
||||||
# if amazon remove "." from version
|
PG_PATH_BIN='bin/';
|
||||||
if [ "${AMAZON}" -eq 1 ]; then
|
|
||||||
DBPATH_VERSION=$(echo "${DBPATH_VERSION}" | sed -e 's/\.//');
|
|
||||||
fi;
|
|
||||||
DBPATH_BIN='bin/';
|
|
||||||
# postgresql binaries
|
# postgresql binaries
|
||||||
DROPDB="dropdb";
|
PG_DROPDB="dropdb";
|
||||||
CREATEDB="createdb";
|
PG_CREATEDB="createdb";
|
||||||
CREATELANG="createlang";
|
PG_CREATELANG="createlang";
|
||||||
PGRESTORE="pg_restore";
|
PG_RESTORE="pg_restore";
|
||||||
CREATEUSER="createuser";
|
PG_CREATEUSER="createuser";
|
||||||
PSQL="psql";
|
PG_PSQL="psql";
|
||||||
|
|
||||||
# default port and host
|
# default port and host
|
||||||
EXCLUDE_LIST="pg_globals"; # space separated
|
EXCLUDE_LIST="pg_globals"; # space separated
|
||||||
LOGFILE="tee -a $LOGS/PG_RESTORE_DB_FILE.`date +"%Y%m%d_%H%M%S"`.log";
|
LOGFILE="tee -a "$LOGS/PG_RESTORE_DB_FILE.$(date +"%Y%m%d_%H%M%S").log"";
|
||||||
|
|
||||||
# get the count for DBs to import
|
# get the count for DBs to import
|
||||||
db_count=`find $DUMP_FOLDER -name "*.sql" -print | wc -l`;
|
db_count=$(find "${DUMP_FOLDER}" -name "*.sql" -print | wc -l);
|
||||||
# start info
|
# start info
|
||||||
if [ "${DUMP_FOLDER}" = "." ]; then
|
if [ "${DUMP_FOLDER}" = "." ]; then
|
||||||
_DUMP_FOLDER="[current folder]";
|
_DUMP_FOLDER="[current folder]";
|
||||||
@@ -284,88 +289,94 @@ echo "= import logs: $LOGS" | $LOGFILE;
|
|||||||
echo "" | $LOGFILE;
|
echo "" | $LOGFILE;
|
||||||
pos=1;
|
pos=1;
|
||||||
# go through all the files an import them into the database
|
# go through all the files an import them into the database
|
||||||
MASTERSTART=`date +'%s'`;
|
MASTERSTART=$(date +"%s");
|
||||||
master_start_time=`date +"%F %T"`;
|
master_start_time=$(date +"%F %T");
|
||||||
# first import the pg_globals file if this is requested, default is yes
|
# first import the pg_globals file if this is requested, default is yes
|
||||||
if [ "$IMPORT_GLOBALS" -eq 1 ]; then
|
if [ "$IMPORT_GLOBALS" -eq 1 ]; then
|
||||||
start_time=`date +"%F %T"`;
|
start_time=$(date +"%F %T");
|
||||||
START=`date +'%s'`;
|
START=$(date +"%s");
|
||||||
# get the pg_globals file
|
# get the pg_globals file
|
||||||
echo "=[Globals Restore]=START=[$start_time]==================================================>" | $LOGFILE;
|
echo "=[Globals Restore]=START=[$start_time]==================================================>" | $LOGFILE;
|
||||||
# get newest and only the first one
|
# get newest and only the first one
|
||||||
file=`ls -1t $DUMP_FOLDER/pg_global* | head -1`;
|
# file=$(ls -1t "$DUMP_FOLDER/pg_global"* | head -1);
|
||||||
filename=`basename $file`;
|
file=$(find "$DUMP_FOLDER" -name "pg_global*" -type f -printf "%Ts\t%p\n" | sort -nr | head -1);
|
||||||
|
filename=$(basename "$file");
|
||||||
# the last _ is for version 10 or higher
|
# the last _ is for version 10 or higher
|
||||||
version=`echo $filename | cut -d "." -f 4 | cut -d "-" -f 2 | cut -d "_" -f 1`; # db version, without prefix of DB type
|
# db version, without prefix of DB type
|
||||||
|
version=$(echo "$filename" | cut -d "." -f 4 | cut -d "-" -f 2 | cut -d "_" -f 1);
|
||||||
# if this is < 10 then we need the second part too
|
# if this is < 10 then we need the second part too
|
||||||
if [ ${version} -lt 10 ]; then
|
if [ "${version}" -lt 10 ]; then
|
||||||
version=$version'.'`echo $filename | cut -d "." -f 5 | cut -d "_" -f 1`; # db version, second part (after .)
|
# db version, second part (after .)
|
||||||
|
version=$version'.'$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 1);
|
||||||
fi;
|
fi;
|
||||||
# if amazon remove "." from version
|
# hostname of original DB, can be used as target host too
|
||||||
if [ "${AMAZON}" -eq 1 ]; then
|
__host=$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 2);
|
||||||
version=$(echo "${version}" | sed -e 's/\.//');
|
# port of original DB, can be used as target port too
|
||||||
fi;
|
__port=$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 3);
|
||||||
__host=`echo $filename | cut -d "." -f 5 | cut -d "_" -f 2`; # hostname of original DB, can be used as target host too
|
|
||||||
__port=`echo $filename | cut -d "." -f 5 | cut -d "_" -f 3`; # port of original DB, can be used as target port too
|
|
||||||
# override file port over given port if it differs and is valid
|
# override file port over given port if it differs and is valid
|
||||||
if [ -z $_port ] && [ "$__port" != $_port ] && [[ $__port =~ $PORT_REGEX ]] ; then
|
if [ -z "$_port" ] && [ "$__port" != "$_port" ] && [[ "$__port" =~ $PORT_REGEX ]] ; then
|
||||||
_port=$__port;
|
_port=$__port;
|
||||||
port='-p '$_port;
|
PG_PARAM_PORT=("-p" "$_port");
|
||||||
fi;
|
fi;
|
||||||
if [ -z "$_host" ] && [ "$__host" != "local" ]; then
|
if [ -z "$_host" ] && [ "$__host" != "local" ]; then
|
||||||
_host=$__host;
|
_host=$__host;
|
||||||
host='-h '$_host;
|
PG_PARAM_HOST=("-h" "${_host}");
|
||||||
fi;
|
fi;
|
||||||
# create the path to the DB from the DB version in the backup file
|
# create the path to the DB from the DB version in the backup file
|
||||||
if [ ! -z "$version" ]; then
|
if [ -n "$version" ]; then
|
||||||
DBPATH_VERSION_LOCAL=$version'/';
|
PG_PATH_VERSION_LOCAL="${version}/";
|
||||||
else
|
else
|
||||||
DBPATH_VERSION_LOCAL=$DBPATH_VERSION;
|
PG_PATH_VERSION_LOCAL="${PG_PATH_VERSION}";
|
||||||
fi;
|
fi;
|
||||||
DBPATH=$DBPATH_BASE$DBPATH_VERSION_LOCAL$DBPATH_BIN;
|
PG_PATH="${PG_BASE_PATH}${PG_PATH_VERSION_LOCAL}${PG_PATH_BIN}";
|
||||||
echo "+ Restore globals file: $filename to [$_host:$_port] @ `date +"%F %T"`" | $LOGFILE;
|
echo "+ Restore globals file: $filename to [$_host:$_port] @ $(date +"%F %T")" | $LOGFILE;
|
||||||
|
_PG_PARAMS=("-U" "postgres");
|
||||||
|
_PG_PARAMS+=("${PG_PARAM_HOST[@]}");
|
||||||
|
_PG_PARAMS+=("${PG_PARAM_PORT[@]}");
|
||||||
|
_PG_PARAMS+=("-f" "$file" "-e" "-q" "-X" "template1");
|
||||||
if [ ${DRY_RUN} -eq 0 ]; then
|
if [ ${DRY_RUN} -eq 0 ]; then
|
||||||
$DBPATH$PSQL -U postgres $host $port -f $file -e -q -X template1 | $LOGFILE;
|
"${PG_PATH}${PG_PSQL}" "${_PG_PARAMS[@]}" | $LOGFILE;
|
||||||
else
|
else
|
||||||
echo "$DBPATH$PSQL -U postgres $host $port -f $file -e -q -X template1" | $LOGFILE;
|
echo "${PG_PATH}${PG_PSQL} ${_PG_PARAMS[*]}" | $LOGFILE;
|
||||||
fi;
|
fi;
|
||||||
DURATION=$[ `date +'%s'`-$START ];
|
DURATION=$(($(date +"%s")-START));
|
||||||
printf "=[Globals Restore]=END===[%s]========================================================>\n" "$(convert_time ${DURATION})" | $LOGFILE;
|
printf "=[Globals Restore]=END===[%s]========================================================>\n" "$(convert_time ${DURATION})" | $LOGFILE;
|
||||||
fi;
|
fi;
|
||||||
for file in $DUMP_FOLDER/*.sql; do
|
for file in "$DUMP_FOLDER/"*.sql; do
|
||||||
start_time=`date +"%F %T"`;
|
start_time=$(date +"%F %T");
|
||||||
START=`date +'%s'`;
|
START=$(date +"%s");
|
||||||
echo "=[$pos/$db_count]=START=[$start_time]==================================================>" | $LOGFILE;
|
echo "=[$pos/$db_count]=START=[$start_time]==================================================>" | $LOGFILE;
|
||||||
# the encoding
|
# the encoding
|
||||||
set_encoding='';
|
set_encoding='';
|
||||||
# get the filename
|
# get the filename
|
||||||
filename=`basename $file`;
|
filename=$(basename "$file");
|
||||||
# get the databse, user
|
# get the databse, user
|
||||||
# default file name is <database>.<owner>.<encoding>.<type>-<version>_<host>_<port>_<date>_<time>_<sequence>
|
# default file name is <database>.<owner>.<encoding>.<type>-<version>_<host>_<port>_<date>_<time>_<sequence>
|
||||||
database=`echo $filename | cut -d "." -f 1`;
|
database=$(echo "$filename" | cut -d "." -f 1);
|
||||||
owner=`echo $filename | cut -d "." -f 2`;
|
owner=$(echo "$filename" | cut -d "." -f 2);
|
||||||
__encoding=`echo $filename | cut -d "." -f 3`;
|
__encoding=$(echo "$filename" | cut -d "." -f 3);
|
||||||
# the last _ part if for version 10
|
# the last _ part if for version 10
|
||||||
version=`echo $filename | cut -d "." -f 4 | cut -d "-" -f 2 | cut -d "_" -f 1`; # db version, without prefix of DB type
|
# db version, without prefix of DB type
|
||||||
|
version=$(echo "$filename" | cut -d "." -f 4 | cut -d "-" -f 2 | cut -d "_" -f 1);
|
||||||
# if this is < 10 then we need the second part too
|
# if this is < 10 then we need the second part too
|
||||||
if [ ${version} -lt 10 ]; then
|
if [ "${version}" -lt 10 ]; then
|
||||||
version=$version'.'`echo $filename | cut -d "." -f 5 | cut -d "_" -f 1`; # db version, second part (after .)
|
# db version, second part (after .)
|
||||||
|
version=$version'.'$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 1);
|
||||||
fi;
|
fi;
|
||||||
# if amazon remove "." from version
|
# hostname of original DB, can be used as target host too
|
||||||
if [ "${AMAZON}" -eq 1 ]; then
|
__host=$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 2);
|
||||||
version=$(echo "${version}" | sed -e 's/\.//');
|
# port of original DB, can be used as target port too
|
||||||
fi;
|
__port=$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 3);
|
||||||
__host=`echo $filename | cut -d "." -f 5 | cut -d "_" -f 2`; # hostname of original DB, can be used as target host too
|
# backup date and time, plus sequence
|
||||||
__port=`echo $filename | cut -d "." -f 5 | cut -d "_" -f 3`; # port of original DB, can be used as target port too
|
# other=$(echo "$filename" | cut -d "." -f 5 | cut -d "_" -f 2-);
|
||||||
other=`echo $filename | cut -d "." -f 5 | cut -d "_" -f 2-`; # backup date and time, plus sequence
|
|
||||||
# override file port over given port if it differs and is valid
|
# override file port over given port if it differs and is valid
|
||||||
if [ -z $_port ] && [ "$__port" != $_port ] && [[ $__port =~ $PORT_REGEX ]] ; then
|
if [ -z "$_port" ] && [ "$__port" != "$_port" ] && [[ "$__port" =~ $PORT_REGEX ]] ; then
|
||||||
_port=$__port;
|
_port=$__port;
|
||||||
port='-p '$_port;
|
PG_PARAM_PORT=("-p" "$_port");
|
||||||
fi;
|
fi;
|
||||||
if [ -z "$_host" ] && [ "$__host" != "local" ]; then
|
if [ -z "$_host" ] && [ "$__host" != "local" ]; then
|
||||||
_host=$__host;
|
_host=$__host;
|
||||||
host='-h '$_host;
|
PG_PARAM_HOST=("-h" "${_host}");
|
||||||
fi;
|
fi;
|
||||||
# override encoding (dangerous)
|
# override encoding (dangerous)
|
||||||
# check if we have a master override
|
# check if we have a master override
|
||||||
@@ -374,19 +385,19 @@ for file in $DUMP_FOLDER/*.sql; do
|
|||||||
fi;
|
fi;
|
||||||
# if no override encoding set first from file, then from global
|
# if no override encoding set first from file, then from global
|
||||||
if [ ! "$set_encoding" ]; then
|
if [ ! "$set_encoding" ]; then
|
||||||
if [ ! -z "$__encoding" ]; then
|
if [ -n "$__encoding" ]; then
|
||||||
set_encoding=$__encoding;
|
set_encoding=$__encoding;
|
||||||
else
|
else
|
||||||
set_encoding=$_encoding;
|
set_encoding=$_encoding;
|
||||||
fi;
|
fi;
|
||||||
fi;
|
fi;
|
||||||
# create the path to the DB from the DB version in the backup file
|
# create the path to the DB from the DB version in the backup file
|
||||||
if [ ! -z "$version" ]; then
|
if [ -n "$version" ]; then
|
||||||
DBPATH_VERSION_LOCAL=$version'/';
|
PG_PATH_VERSION_LOCAL="${version}/";
|
||||||
else
|
else
|
||||||
DBPATH_VERSION_LOCAL=$DBPATH_VERSION;
|
PG_PATH_VERSION_LOCAL="${PG_PATH_VERSION}";
|
||||||
fi;
|
fi;
|
||||||
DBPATH=$DBPATH_BASE$DBPATH_VERSION_LOCAL$DBPATH_BIN;
|
PG_PATH="${PG_BASE_PATH}${PG_PATH_VERSION_LOCAL}${PG_PATH_BIN}";
|
||||||
# check this is skip or not
|
# check this is skip or not
|
||||||
exclude=0;
|
exclude=0;
|
||||||
for exclude_db in $EXCLUDE_LIST; do
|
for exclude_db in $EXCLUDE_LIST; do
|
||||||
@@ -397,61 +408,90 @@ for file in $DUMP_FOLDER/*.sql; do
|
|||||||
if [ $exclude -eq 0 ]; then
|
if [ $exclude -eq 0 ]; then
|
||||||
# create user if not exist yet
|
# create user if not exist yet
|
||||||
# check query for user
|
# check query for user
|
||||||
user_oid=`echo "SELECT oid FROM pg_roles WHERE rolname = '$owner';" | $PSQL -U postgres $host $port -A -F "," -t -q -X template1`;
|
# for all calls
|
||||||
if [ -z $user_oid ]; then
|
_PG_PARAMS_ALL=("-U" "postgres");
|
||||||
echo "+ Create USER '$owner' for DB '$database' [$_host:$_port] @ `date +"%F %T"`" | $LOGFILE;
|
_PG_PARAMS_ALL+=("${PG_PARAM_HOST[@]}");
|
||||||
|
_PG_PARAMS_ALL+=("${PG_PARAM_PORT[@]}");
|
||||||
|
# for the call
|
||||||
|
_PG_PARAMS=("${_PG_PARAMS_ALL[@]}");
|
||||||
|
_PG_PARAMS+=("-A" "-F" "," "-t" "-q" "-X" "-c" "SELECT oid FROM pg_roles WHERE rolname = '$owner';" "template1");
|
||||||
|
# user_oid=$(echo "SELECT oid FROM pg_roles WHERE rolname = '$owner';" | $PSQL -U postgres $host $port -A -F "," -t -q -X template1);
|
||||||
|
user_oid=$("$PG_PSQL" "${_PG_PARAMS[@]}");
|
||||||
|
if [ -z "$user_oid" ]; then
|
||||||
|
echo "+ Create USER '$owner' for DB '$database' [$_host:$_port] @ $(date +"%F %T")" | $LOGFILE;
|
||||||
|
_PG_PARAMS=("${_PG_PARAMS_ALL[@]}");
|
||||||
|
_PG_PARAMS+=("-D" "-R" "-S" "$owner");
|
||||||
if [ ${DRY_RUN} -eq 0 ]; then
|
if [ ${DRY_RUN} -eq 0 ]; then
|
||||||
$CREATEUSER -U postgres -D -R -S $host $port $owner;
|
# "${PG_PATH}${PG_CREATEUSER}" -U postgres -D -R -S $host $port $owner;
|
||||||
|
"${PG_PATH}${PG_CREATEUSER}" "${_PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo "$CREATEUSER -U postgres -D -R -S $host $port $owner";
|
echo "${PG_PATH}${PG_CREATEUSER} ${_PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
fi;
|
fi;
|
||||||
# before importing the data, drop this database
|
# before importing the data, drop this database
|
||||||
echo "- Drop DB '$database' [$_host:$_port] @ `date +"%F %T"`" | $LOGFILE;
|
echo "- Drop DB '$database' [$_host:$_port] @ $(date +"%F %T")" | $LOGFILE;
|
||||||
|
_PG_PARAMS=("${_PG_PARAMS_ALL[@]}");
|
||||||
|
_PG_PARAMS+=("$database");
|
||||||
if [ ${DRY_RUN} -eq 0 ]; then
|
if [ ${DRY_RUN} -eq 0 ]; then
|
||||||
$DBPATH$DROPDB -U postgres $host $port $database;
|
"${PG_PATH}${PG_DROPDB}" "${_PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo "$DBPATH$DROPDB -U postgres $host $port $database";
|
echo "${PG_PATH}${PG_DROPDB} ${_PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
echo "+ Create DB '$database' with '$owner' [$_host:$_port] @ `date +"%F %T"`" | $LOGFILE;
|
echo "+ Create DB '$database' with '$owner' [$_host:$_port] @ $(date +"%F %T")" | $LOGFILE;
|
||||||
|
_PG_PARAMS=("${_PG_PARAMS_ALL[@]}");
|
||||||
|
_PG_PARAMS+=("-O" "$owner" "-E" "$set_encoding" "-T" "$TEMPLATEDB" "$database");
|
||||||
if [ ${DRY_RUN} -eq 0 ]; then
|
if [ ${DRY_RUN} -eq 0 ]; then
|
||||||
$DBPATH$CREATEDB -U postgres -O $owner -E $set_encoding -T $TEMPLATEDB $host $port $database;
|
# "${PG_PATH}${PG_CREATEDB}" -U postgres -O $owner -E $set_encoding -T $TEMPLATEDB $host $port $database;
|
||||||
|
"${PG_PATH}${PG_CREATEDB}" "${_PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo "$DBPATH$CREATEDB -U postgres -O $owner -E $set_encoding -T $TEMPLATEDB $host $port $database";
|
echo "${PG_PATH}${PG_CREATEDB} ${_PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
if [ -f $DBPATH$CREATELANG ]; then
|
if [ -f "${PG_PATH}${PG_CREATELANG}" ]; then
|
||||||
echo "+ Create plpgsql lang in DB '$database' [$_host:$_port] @ `date +"%F %T"`" | $LOGFILE;
|
echo "+ Create plpgsql lang in DB '$database' [$_host:$_port] @ $(date +"%F %T")" | $LOGFILE;
|
||||||
|
_PG_PARAMS=("${_PG_PARAMS_ALL[@]}");
|
||||||
|
_PG_PARAMS+=("plpgsql" "$database");
|
||||||
if [ ${DRY_RUN} -eq 0 ]; then
|
if [ ${DRY_RUN} -eq 0 ]; then
|
||||||
$DBPATH$CREATELANG -U postgres plpgsql $host $port $database;
|
# "${PG_PATH}${PG_CREATELANG}" -U postgres plpgsql $host $port $database;
|
||||||
|
"${PG_PATH}${PG_CREATELANG}" "${_PG_PARAMS[@]}";
|
||||||
else
|
else
|
||||||
echo "$DBPATH$CREATELANG -U postgres plpgsql $host $port $database";
|
echo "${PG_PATH}${PG_CREATELANG} ${_PG_PARAMS[*]}";
|
||||||
fi;
|
fi;
|
||||||
fi;
|
fi;
|
||||||
echo "% Restore data from '$filename' to DB '$database' using $MAX_JOBS jobs [$_host:$_port] @ `date +"%F %T"`" | $LOGFILE;
|
echo "% Restore data from '$filename' to DB '$database' using $MAX_JOBS jobs [$_host:$_port] @ $(date +"%F %T")" | $LOGFILE;
|
||||||
|
_PG_PARAMS=("${_PG_PARAMS_ALL[@]}");
|
||||||
|
_PG_PARAMS+=("-d" "$database" "-F" "c" "-v" "-c" "-j" "$MAX_JOBS" "$file");
|
||||||
if [ ${DRY_RUN} -eq 0 ]; then
|
if [ ${DRY_RUN} -eq 0 ]; then
|
||||||
$DBPATH$PGRESTORE -U postgres -d $database -F c -v -c -j $MAX_JOBS $host $port $file 2>$LOGS'/errors.'$database'.'$(date +"%Y%m%d_%H%M%S".log);
|
# "${PG_PATH}${PG_RESTORE}" -U postgres -d $database -F c -v -c -j $MAX_JOBS $host $port $file 2>"$LOGS/errors.${database}.$(date +"%Y%m%d_%H%M%S").log";
|
||||||
|
"${PG_PATH}${PG_RESTORE}" "${_PG_PARAMS[@]}" 2>"$LOGS/errors.${database}.$(date +"%Y%m%d_%H%M%S").log";
|
||||||
else
|
else
|
||||||
echo "$DBPATH$PGRESTORE -U postgres -d $database -F c -v -c -j $MAX_JOBS $host $port $file 2>$LOGS'/errors.'$database'.'$(date +"%Y%m%d_%H%M%S".log)";
|
echo "${PG_PATH}${PG_RESTORE} ${_PG_PARAMS[*]} 2>${LOGS}/errors.${database}.$(date +"%Y%m%d_%H%M%S").log";
|
||||||
fi;
|
fi;
|
||||||
# BUG FIX FOR POSTGRESQL 9.6.2 db_dump
|
# BUG FIX FOR POSTGRESQL 9.6.2 db_dump
|
||||||
# it does not dump the default public ACL so the owner of the DB cannot access the data, check if the ACL dump is missing and do a basic restore
|
# it does not dump the default public ACL so the owner of the DB cannot access the data,
|
||||||
if [ -z "$($DBPATH$PGRESTORE -l $file | grep -- "ACL - public postgres")" ]; then
|
# check if the ACL dump is missing and do a basic restore
|
||||||
echo "? Fixing missing basic public schema ACLs from DB $database [$_host:$_port] @ `date +"%F %T"`";
|
if ! "$("${PG_PATH}${PG_RESTORE}" -l "$file" | grep -q -- "ACL - public postgres")"; then
|
||||||
|
echo "? Fixing missing basic public schema ACLs from DB $database [$_host:$_port] @ $(date +"%F %T")";
|
||||||
|
# echo "GRANT USAGE ON SCHEMA public TO public;" | "${PG_PATH}${PG_PSQL}" -U postgres -Atq $host $port $database;
|
||||||
|
# echo "GRANT CREATE ON SCHEMA public TO public;" | "${PG_PATH}${PG_PSQL}" -U postgres -Atq $host $port $database;
|
||||||
# grant usage on schema public to public;
|
# grant usage on schema public to public;
|
||||||
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
|
_PG_PARAMS+=("-Atq" "-c" "GRANT USAGE ON SCHEMA public TO public;" "${database}");
|
||||||
|
"${PG_PSQL}" "${_PG_PARAMS[@]}";
|
||||||
# grant create on schema public to public;
|
# grant create on schema public to public;
|
||||||
echo "GRANT USAGE ON SCHEMA public TO public;" | $DBPATH$PSQL -U postgres -Atq $host $port $database;
|
_PG_PARAMS=("${PG_PARAMS[@]}");
|
||||||
echo "GRANT CREATE ON SCHEMA public TO public;" | $DBPATH$PSQL -U postgres -Atq $host $port $database;
|
_PG_PARAMS+=("-Atq" "-c" "GRANT CREATE ON SCHEMA public TO public;" "${database}");
|
||||||
|
"${PG_PSQL}" "${_PG_PARAMS[@]}";
|
||||||
fi;
|
fi;
|
||||||
echo "$ Restore of data '$filename' for DB '$database' [$_host:$_port] finished" | $LOGFILE;
|
echo "$ Restore of data '$filename' for DB '$database' [$_host:$_port] finished" | $LOGFILE;
|
||||||
DURATION=$[ `date +'%s'`-$START ];
|
DURATION=$(($(date "+%s")-START));
|
||||||
echo "* Start at $start_time and end at `date +"%F %T"` and ran for $(convert_time ${DURATION}) seconds" | $LOGFILE;
|
echo "* Start at $start_time and end at $(date +"%F %T") and ran for $(convert_time ${DURATION}) seconds" | $LOGFILE;
|
||||||
else
|
else
|
||||||
DURATION=0;
|
DURATION=0;
|
||||||
echo "# Skipped DB '$database'" | $LOGFILE;
|
echo "# Skipped DB '$database'" | $LOGFILE;
|
||||||
fi;
|
fi;
|
||||||
printf "=[$pos/$db_count]=END===[%s]========================================================>\n" "$(convert_time ${DURATION})" | $LOGFILE;
|
printf "=[$pos/$db_count]=END===[%s]========================================================>\n" "$(convert_time ${DURATION})" | $LOGFILE;
|
||||||
pos=$[ $pos+1 ];
|
pos=$((pos+1));
|
||||||
done;
|
done;
|
||||||
DURATION=$[ `date +'%s'`-$MASTERSTART ];
|
DURATION=$(($(date "+%s")-MASTERSTART));
|
||||||
echo "" | $LOGFILE;
|
echo "" | $LOGFILE;
|
||||||
echo "= Start at $master_start_time and end at `date +"%F %T"` and ran for $(convert_time ${DURATION}) seconds. Imported $db_count databases." | $LOGFILE;
|
echo "= Start at $master_start_time and end at $(date +"%F %T") and ran for $(convert_time ${DURATION}) seconds. Imported $db_count databases." | $LOGFILE;
|
||||||
|
|||||||
Reference in New Issue
Block a user