Merge remote-tracking branch 'upstream/master' into logging

This commit is contained in:
palto42 2019-11-01 19:44:04 +01:00
commit b121290c0f
26 changed files with 1015 additions and 259 deletions

14
NEWS
View file

@ -1,4 +1,16 @@
1.4.1.dev0 1.4.3
* Monitor backups with Cronitor hook integration. See the documentation for more information:
https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook
1.4.2
* Extract files to a particular directory via "borgmatic extract --destination" flag.
* Rename "borgmatic extract --restore-path" flag to "--path" to reduce confusion with the separate
"borgmatic restore" action. Any uses of "--restore-path" will continue working.
1.4.1
* #229: Restore backed up PostgreSQL databases via "borgmatic restore" action. See the
documentation for more information:
https://torsion.org/borgmatic/docs/how-to/backup-your-databases/
* Documentation on how to develop borgmatic's documentation: * Documentation on how to develop borgmatic's documentation:
https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/#documentation-development https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/#documentation-development

View file

@ -72,7 +72,7 @@ href="https://asciinema.org/a/203761" target="_blank">screencast</a>.
* [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/) * [Deal with very large backups](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/) * [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
* [Restore a backup](https://torsion.org/borgmatic/docs/how-to/restore-a-backup/) * [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/) * [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/) * [Upgrade borgmatic](https://torsion.org/borgmatic/docs/how-to/upgrade/)

View file

@ -1,4 +1,5 @@
import logging import logging
import os
from borgmatic.execute import execute_command, execute_command_without_capture from borgmatic.execute import execute_command, execute_command_without_capture
@ -47,24 +48,26 @@ def extract_last_archive_dry_run(repository, lock_wait=None, local_path='borg',
) )
) )
execute_command(full_extract_command) execute_command(full_extract_command, working_directory=None, error_on_warnings=True)
def extract_archive( def extract_archive(
dry_run, dry_run,
repository, repository,
archive, archive,
restore_paths, paths,
location_config, location_config,
storage_config, storage_config,
local_path='borg', local_path='borg',
remote_path=None, remote_path=None,
destination_path=None,
progress=False, progress=False,
): ):
''' '''
Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to Given a dry-run flag, a local or remote repository path, an archive name, zero or more paths to
restore from the archive, and location/storage configuration dicts, extract the archive into the restore from the archive, location/storage configuration dicts, optional local and remote Borg
current directory. paths, and an optional destination path to extract to, extract the archive into the current
directory.
''' '''
umask = storage_config.get('umask', None) umask = storage_config.get('umask', None)
lock_wait = storage_config.get('lock_wait', None) lock_wait = storage_config.get('lock_wait', None)
@ -79,14 +82,18 @@ def extract_archive(
+ (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ()) + (('--debug', '--list', '--show-rc') if logger.isEnabledFor(logging.DEBUG) else ())
+ (('--dry-run',) if dry_run else ()) + (('--dry-run',) if dry_run else ())
+ (('--progress',) if progress else ()) + (('--progress',) if progress else ())
+ ('::'.join((repository, archive)),) + ('::'.join((os.path.abspath(repository), archive)),)
+ (tuple(restore_paths) if restore_paths else ()) + (tuple(paths) if paths else ())
) )
# The progress output isn't compatible with captured and logged output, as progress messes with # The progress output isn't compatible with captured and logged output, as progress messes with
# the terminal directly. # the terminal directly.
if progress: if progress:
execute_command_without_capture(full_command) execute_command_without_capture(
full_command, working_directory=destination_path, error_on_warnings=True
)
return return
execute_command(full_command) # Error on warnings, as Borg only gives a warning if the restore paths don't exist in the
# archive!
execute_command(full_command, working_directory=destination_path, error_on_warnings=True)

View file

@ -9,6 +9,7 @@ SUBPARSER_ALIASES = {
'create': ['--create', '-C'], 'create': ['--create', '-C'],
'check': ['--check', '-k'], 'check': ['--check', '-k'],
'extract': ['--extract', '-x'], 'extract': ['--extract', '-x'],
'restore': ['--restore', '-r'],
'list': ['--list', '-l'], 'list': ['--list', '-l'],
'info': ['--info', '-i'], 'info': ['--info', '-i'],
} }
@ -269,7 +270,7 @@ def parse_arguments(*unparsed_arguments):
extract_parser = subparsers.add_parser( extract_parser = subparsers.add_parser(
'extract', 'extract',
aliases=SUBPARSER_ALIASES['extract'], aliases=SUBPARSER_ALIASES['extract'],
help='Extract a named archive to the current directory', help='Extract files from a named archive to the current directory',
description='Extract a named archive to the current directory', description='Extract a named archive to the current directory',
add_help=False, add_help=False,
) )
@ -278,12 +279,20 @@ def parse_arguments(*unparsed_arguments):
'--repository', '--repository',
help='Path of repository to extract, defaults to the configured repository if there is only one', help='Path of repository to extract, defaults to the configured repository if there is only one',
) )
extract_group.add_argument('--archive', help='Name of archive to operate on', required=True) extract_group.add_argument('--archive', help='Name of archive to extract', required=True)
extract_group.add_argument( extract_group.add_argument(
'--path',
'--restore-path', '--restore-path',
metavar='PATH',
nargs='+', nargs='+',
dest='restore_paths', dest='paths',
help='Paths to restore from archive, defaults to the entire archive', help='Paths to extract from archive, defaults to the entire archive',
)
extract_group.add_argument(
'--destination',
metavar='PATH',
dest='destination',
help='Directory to extract files into, defaults to the current directory',
) )
extract_group.add_argument( extract_group.add_argument(
'--progress', '--progress',
@ -296,6 +305,37 @@ def parse_arguments(*unparsed_arguments):
'-h', '--help', action='help', help='Show this help message and exit' '-h', '--help', action='help', help='Show this help message and exit'
) )
restore_parser = subparsers.add_parser(
'restore',
aliases=SUBPARSER_ALIASES['restore'],
help='Restore database dumps from a named archive',
description='Restore database dumps from a named archive. (To extract files instead, use "borgmatic extract".)',
add_help=False,
)
restore_group = restore_parser.add_argument_group('restore arguments')
restore_group.add_argument(
'--repository',
help='Path of repository to restore from, defaults to the configured repository if there is only one',
)
restore_group.add_argument('--archive', help='Name of archive to restore from', required=True)
restore_group.add_argument(
'--database',
metavar='NAME',
nargs='+',
dest='databases',
help='Names of databases to restore from archive, defaults to all databases. Note that any databases to restore must be defined in borgmatic\'s configuration',
)
restore_group.add_argument(
'--progress',
dest='progress',
default=False,
action='store_true',
help='Display progress for each database dump file as it is extracted from archive',
)
restore_group.add_argument(
'-h', '--help', action='help', help='Show this help message and exit'
)
list_parser = subparsers.add_parser( list_parser = subparsers.add_parser(
'list', 'list',
aliases=SUBPARSER_ALIASES['list'], aliases=SUBPARSER_ALIASES['list'],

View file

@ -18,7 +18,7 @@ from borgmatic.borg import list as borg_list
from borgmatic.borg import prune as borg_prune from borgmatic.borg import prune as borg_prune
from borgmatic.commands.arguments import parse_arguments from borgmatic.commands.arguments import parse_arguments
from borgmatic.config import checks, collect, convert, validate from borgmatic.config import checks, collect, convert, validate
from borgmatic.hooks import command, healthchecks, postgresql from borgmatic.hooks import command, cronitor, healthchecks, postgresql
from borgmatic.logger import configure_logging, should_do_markup from borgmatic.logger import configure_logging, should_do_markup
from borgmatic.signals import configure_signals from borgmatic.signals import configure_signals
from borgmatic.verbosity import verbosity_to_log_level from borgmatic.verbosity import verbosity_to_log_level
@ -56,6 +56,9 @@ def run_configuration(config_filename, config, arguments):
healthchecks.ping_healthchecks( healthchecks.ping_healthchecks(
hooks.get('healthchecks'), config_filename, global_arguments.dry_run, 'start' hooks.get('healthchecks'), config_filename, global_arguments.dry_run, 'start'
) )
cronitor.ping_cronitor(
hooks.get('cronitor'), config_filename, global_arguments.dry_run, 'run'
)
command.execute_hook( command.execute_hook(
hooks.get('before_backup'), hooks.get('before_backup'),
hooks.get('umask'), hooks.get('umask'),
@ -81,11 +84,12 @@ def run_configuration(config_filename, config, arguments):
storage=storage, storage=storage,
retention=retention, retention=retention,
consistency=consistency, consistency=consistency,
hooks=hooks,
local_path=local_path, local_path=local_path,
remote_path=remote_path, remote_path=remote_path,
repository_path=repository_path, repository_path=repository_path,
) )
except (OSError, CalledProcessError) as error: except (OSError, CalledProcessError, ValueError) as error:
encountered_error = error encountered_error = error
error_repository = repository_path error_repository = repository_path
yield from make_error_log_records( yield from make_error_log_records(
@ -107,6 +111,9 @@ def run_configuration(config_filename, config, arguments):
healthchecks.ping_healthchecks( healthchecks.ping_healthchecks(
hooks.get('healthchecks'), config_filename, global_arguments.dry_run hooks.get('healthchecks'), config_filename, global_arguments.dry_run
) )
cronitor.ping_cronitor(
hooks.get('cronitor'), config_filename, global_arguments.dry_run, 'complete'
)
except (OSError, CalledProcessError) as error: except (OSError, CalledProcessError) as error:
encountered_error = error encountered_error = error
yield from make_error_log_records( yield from make_error_log_records(
@ -128,6 +135,9 @@ def run_configuration(config_filename, config, arguments):
healthchecks.ping_healthchecks( healthchecks.ping_healthchecks(
hooks.get('healthchecks'), config_filename, global_arguments.dry_run, 'fail' hooks.get('healthchecks'), config_filename, global_arguments.dry_run, 'fail'
) )
cronitor.ping_cronitor(
hooks.get('cronitor'), config_filename, global_arguments.dry_run, 'fail'
)
except (OSError, CalledProcessError) as error: except (OSError, CalledProcessError) as error:
yield from make_error_log_records( yield from make_error_log_records(
'{}: Error running on-error hook'.format(config_filename), error '{}: Error running on-error hook'.format(config_filename), error
@ -141,6 +151,7 @@ def run_actions(
storage, storage,
retention, retention,
consistency, consistency,
hooks,
local_path, local_path,
remote_path, remote_path,
repository_path repository_path
@ -151,6 +162,9 @@ def run_actions(
from the command-line arguments on the given repository. from the command-line arguments on the given repository.
Yield JSON output strings from executing any actions that produce JSON. Yield JSON output strings from executing any actions that produce JSON.
Raise OSError or subprocess.CalledProcessError if an error occurs running a command for an
action. Raise ValueError if the arguments or configuration passed to action are invalid.
''' '''
repository = os.path.expanduser(repository_path) repository = os.path.expanduser(repository_path)
global_arguments = arguments['global'] global_arguments = arguments['global']
@ -210,13 +224,52 @@ def run_actions(
global_arguments.dry_run, global_arguments.dry_run,
repository, repository,
arguments['extract'].archive, arguments['extract'].archive,
arguments['extract'].restore_paths, arguments['extract'].paths,
location, location,
storage, storage,
local_path=local_path, local_path=local_path,
remote_path=remote_path, remote_path=remote_path,
destination_path=arguments['extract'].destination,
progress=arguments['extract'].progress, progress=arguments['extract'].progress,
) )
if 'restore' in arguments:
if arguments['restore'].repository is None or repository == arguments['restore'].repository:
logger.info(
'{}: Restoring databases from archive {}'.format(
repository, arguments['restore'].archive
)
)
restore_names = arguments['restore'].databases or []
if 'all' in restore_names:
restore_names = []
# Extract dumps for the named databases from the archive.
dump_patterns = postgresql.make_database_dump_patterns(restore_names)
borg_extract.extract_archive(
global_arguments.dry_run,
repository,
arguments['restore'].archive,
postgresql.convert_glob_patterns_to_borg_patterns(dump_patterns),
location,
storage,
local_path=local_path,
remote_path=remote_path,
destination_path='/',
progress=arguments['restore'].progress,
)
# Map the restore names to the corresponding database configurations.
databases = list(
postgresql.get_database_configurations(
hooks.get('postgresql_databases'),
restore_names or postgresql.get_database_names_from_dumps(dump_patterns),
)
)
# Finally, restore the databases and cleanup the dumps.
postgresql.restore_database_dumps(databases, repository, global_arguments.dry_run)
postgresql.remove_database_dumps(databases, repository, global_arguments.dry_run)
if 'list' in arguments: if 'list' in arguments:
if arguments['list'].repository is None or repository == arguments['list'].repository: if arguments['list'].repository is None or repository == arguments['list'].repository:
logger.info('{}: Listing archives'.format(repository)) logger.info('{}: Listing archives'.format(repository))
@ -295,9 +348,10 @@ def make_error_log_records(message, error=None):
yield logging.makeLogRecord( yield logging.makeLogRecord(
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message) dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=message)
) )
yield logging.makeLogRecord( if error.output:
dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output) yield logging.makeLogRecord(
) dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error.output)
)
yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error)) yield logging.makeLogRecord(dict(levelno=logging.CRITICAL, levelname='CRITICAL', msg=error))
except (ValueError, OSError) as error: except (ValueError, OSError) as error:
yield logging.makeLogRecord( yield logging.makeLogRecord(

View file

@ -430,6 +430,13 @@ map:
Create an account at https://healthchecks.io if you'd like to use this service. Create an account at https://healthchecks.io if you'd like to use this service.
example: example:
https://hc-ping.com/your-uuid-here https://hc-ping.com/your-uuid-here
cronitor:
type: str
desc: |
Cronitor ping URL to notify when a backup begins, ends, or errors. Create an
account at https://cronitor.io if you'd like to use this service.
example:
https://cronitor.link/d3x0c1
before_everything: before_everything:
seq: seq:
- type: str - type: str

View file

@ -9,17 +9,28 @@ ERROR_OUTPUT_MAX_LINE_COUNT = 25
BORG_ERROR_EXIT_CODE = 2 BORG_ERROR_EXIT_CODE = 2
def borg_command(full_command): def exit_code_indicates_error(command, exit_code, error_on_warnings=False):
''' '''
Return True if this is a Borg command, or False if it's some other command. Return True if the given exit code from running the command corresponds to an error.
''' '''
return 'borg' in full_command[0] # If we're running something other than Borg, treat all non-zero exit codes as errors.
if 'borg' in command[0] and not error_on_warnings:
return bool(exit_code >= BORG_ERROR_EXIT_CODE)
return bool(exit_code != 0)
def execute_and_log_output(full_command, output_log_level, shell, environment): def execute_and_log_output(
full_command, output_log_level, shell, environment, working_directory, error_on_warnings
):
last_lines = [] last_lines = []
process = subprocess.Popen( process = subprocess.Popen(
full_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=shell, env=environment full_command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=shell,
env=environment,
cwd=working_directory,
) )
while process.poll() is None: while process.poll() is None:
@ -41,13 +52,7 @@ def execute_and_log_output(full_command, output_log_level, shell, environment):
exit_code = process.poll() exit_code = process.poll()
# If we're running something other than Borg, treat all non-zero exit codes as errors. if exit_code_indicates_error(full_command, exit_code, error_on_warnings):
if borg_command(full_command):
error = bool(exit_code >= BORG_ERROR_EXIT_CODE)
else:
error = bool(exit_code != 0)
if error:
# If an error occurs, include its output in the raised exception so that we don't # If an error occurs, include its output in the raised exception so that we don't
# inadvertently hide error output. # inadvertently hide error output.
if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT: if len(last_lines) == ERROR_OUTPUT_MAX_LINE_COUNT:
@ -59,13 +64,19 @@ def execute_and_log_output(full_command, output_log_level, shell, environment):
def execute_command( def execute_command(
full_command, output_log_level=logging.INFO, shell=False, extra_environment=None full_command,
output_log_level=logging.INFO,
shell=False,
extra_environment=None,
working_directory=None,
error_on_warnings=False,
): ):
''' '''
Execute the given command (a sequence of command/argument strings) and log its output at the Execute the given command (a sequence of command/argument strings) and log its output at the
given log level. If output log level is None, instead capture and return the output. If given log level. If output log level is None, instead capture and return the output. If
shell is True, execute the command within a shell. If an extra environment dict is given, then shell is True, execute the command within a shell. If an extra environment dict is given, then
use it to augment the current environment, and pass the result into the command. use it to augment the current environment, and pass the result into the command. If a working
directory is given, use that as the present working directory when running the command.
Raise subprocesses.CalledProcessError if an error occurs while running the command. Raise subprocesses.CalledProcessError if an error occurs while running the command.
''' '''
@ -73,22 +84,34 @@ def execute_command(
environment = {**os.environ, **extra_environment} if extra_environment else None environment = {**os.environ, **extra_environment} if extra_environment else None
if output_log_level is None: if output_log_level is None:
output = subprocess.check_output(full_command, shell=shell, env=environment) output = subprocess.check_output(
full_command, shell=shell, env=environment, cwd=working_directory
)
return output.decode() if output is not None else None return output.decode() if output is not None else None
else: else:
execute_and_log_output(full_command, output_log_level, shell=shell, environment=environment) execute_and_log_output(
full_command,
output_log_level,
shell=shell,
environment=environment,
working_directory=working_directory,
error_on_warnings=error_on_warnings,
)
def execute_command_without_capture(full_command): def execute_command_without_capture(full_command, working_directory=None, error_on_warnings=False):
''' '''
Execute the given command (a sequence of command/argument strings), but don't capture or log its Execute the given command (a sequence of command/argument strings), but don't capture or log its
output in any way. This is necessary for commands that monkey with the terminal (e.g. progress output in any way. This is necessary for commands that monkey with the terminal (e.g. progress
display) or provide interactive prompts. display) or provide interactive prompts.
If a working directory is given, use that as the present working directory when running the
command.
''' '''
logger.debug(' '.join(full_command)) logger.debug(' '.join(full_command))
try: try:
subprocess.check_call(full_command) subprocess.check_call(full_command, cwd=working_directory)
except subprocess.CalledProcessError as error: except subprocess.CalledProcessError as error:
if error.returncode >= BORG_ERROR_EXIT_CODE: if exit_code_indicates_error(full_command, error.returncode, error_on_warnings):
raise raise

View file

@ -0,0 +1,24 @@
import logging
import requests
logger = logging.getLogger(__name__)
def ping_cronitor(ping_url, config_filename, dry_run, append):
'''
Ping the given Cronitor URL, appending the append string. Use the given configuration filename
in any log entries. If this is a dry run, then don't actually ping anything.
'''
if not ping_url:
logger.debug('{}: No Cronitor hook set'.format(config_filename))
return
dry_run_label = ' (dry run; not actually pinging)' if dry_run else ''
ping_url = '{}/{}'.format(ping_url, append)
logger.info('{}: Pinging Cronitor {}{}'.format(config_filename, append, dry_run_label))
logger.debug('{}: Using Cronitor ping URL {}'.format(config_filename, ping_url))
logging.getLogger('urllib3').setLevel(logging.ERROR)
requests.get(ping_url)

View file

@ -7,12 +7,12 @@ logger = logging.getLogger(__name__)
def ping_healthchecks(ping_url_or_uuid, config_filename, dry_run, append=None): def ping_healthchecks(ping_url_or_uuid, config_filename, dry_run, append=None):
''' '''
Ping the given healthchecks.io URL or UUID, appending the append string if any. Use the given Ping the given Healthchecks URL or UUID, appending the append string if any. Use the given
configuration filename in any log entries. If this is a dry run, then don't actually ping configuration filename in any log entries. If this is a dry run, then don't actually ping
anything. anything.
''' '''
if not ping_url_or_uuid: if not ping_url_or_uuid:
logger.debug('{}: No healthchecks hook set'.format(config_filename)) logger.debug('{}: No Healthchecks hook set'.format(config_filename))
return return
ping_url = ( ping_url = (
@ -26,11 +26,11 @@ def ping_healthchecks(ping_url_or_uuid, config_filename, dry_run, append=None):
ping_url = '{}/{}'.format(ping_url, append) ping_url = '{}/{}'.format(ping_url, append)
logger.info( logger.info(
'{}: Pinging healthchecks.io{}{}'.format( '{}: Pinging Healthchecks{}{}'.format(
config_filename, ' ' + append if append else '', dry_run_label config_filename, ' ' + append if append else '', dry_run_label
) )
) )
logger.debug('{}: Using healthchecks.io ping URL {}'.format(config_filename, ping_url)) logger.debug('{}: Using Healthchecks ping URL {}'.format(config_filename, ping_url))
logging.getLogger('urllib3').setLevel(logging.ERROR) logging.getLogger('urllib3').setLevel(logging.ERROR)
requests.get(ping_url) requests.get(ping_url)

View file

@ -1,3 +1,4 @@
import glob
import logging import logging
import os import os
@ -7,32 +8,39 @@ DUMP_PATH = '~/.borgmatic/postgresql_databases'
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def dump_databases(databases, config_filename, dry_run): def make_database_dump_filename(name, hostname=None):
'''
Based on the given database name and hostname, return a filename to use for the database dump.
Raise ValueError if the database name is invalid.
'''
if os.path.sep in name:
raise ValueError('Invalid database name {}'.format(name))
return os.path.join(os.path.expanduser(DUMP_PATH), hostname or 'localhost', name)
def dump_databases(databases, log_prefix, dry_run):
''' '''
Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts, Dump the given PostgreSQL databases to disk. The databases are supplied as a sequence of dicts,
one dict describing each database as per the configuration schema. Use the given configuration one dict describing each database as per the configuration schema. Use the given log prefix in
filename in any log entries. If this is a dry run, then don't actually dump anything. any log entries. If this is a dry run, then don't actually dump anything.
''' '''
if not databases: if not databases:
logger.debug('{}: No PostgreSQL databases configured'.format(config_filename)) logger.debug('{}: No PostgreSQL databases configured'.format(log_prefix))
return return
dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else '' dry_run_label = ' (dry run; not actually dumping anything)' if dry_run else ''
logger.info('{}: Dumping PostgreSQL databases{}'.format(config_filename, dry_run_label)) logger.info('{}: Dumping PostgreSQL databases{}'.format(log_prefix, dry_run_label))
for database in databases: for database in databases:
if os.path.sep in database['name']:
raise ValueError('Invalid database name {}'.format(database['name']))
dump_path = os.path.join(
os.path.expanduser(DUMP_PATH), database.get('hostname', 'localhost')
)
name = database['name'] name = database['name']
dump_filename = make_database_dump_filename(name, database.get('hostname'))
all_databases = bool(name == 'all') all_databases = bool(name == 'all')
command = ( command = (
('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean') ('pg_dumpall' if all_databases else 'pg_dump', '--no-password', '--clean')
+ ('--file', os.path.join(dump_path, name)) + ('--file', dump_filename)
+ (('--host', database['hostname']) if 'hostname' in database else ()) + (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ()) + (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ()) + (('--username', database['username']) if 'username' in database else ())
@ -42,47 +50,135 @@ def dump_databases(databases, config_filename, dry_run):
) )
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
logger.debug( logger.debug('{}: Dumping PostgreSQL database {}{}'.format(log_prefix, name, dry_run_label))
'{}: Dumping PostgreSQL database {}{}'.format(config_filename, name, dry_run_label)
)
if not dry_run: if not dry_run:
os.makedirs(dump_path, mode=0o700, exist_ok=True) os.makedirs(os.path.dirname(dump_filename), mode=0o700, exist_ok=True)
execute_command(command, extra_environment=extra_environment) execute_command(command, extra_environment=extra_environment)
def remove_database_dumps(databases, config_filename, dry_run): def remove_database_dumps(databases, log_prefix, dry_run):
''' '''
Remove the database dumps for the given databases. The databases are supplied as a sequence of Remove the database dumps for the given databases. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given dicts, one dict describing each database as per the configuration schema. Use the log prefix in
configuration filename in any log entries. If this is a dry run, then don't actually remove any log entries. If this is a dry run, then don't actually remove anything.
anything.
''' '''
if not databases: if not databases:
logger.debug('{}: No PostgreSQL databases configured'.format(config_filename)) logger.debug('{}: No PostgreSQL databases configured'.format(log_prefix))
return return
dry_run_label = ' (dry run; not actually removing anything)' if dry_run else '' dry_run_label = ' (dry run; not actually removing anything)' if dry_run else ''
logger.info('{}: Removing PostgreSQL database dumps{}'.format(config_filename, dry_run_label)) logger.info('{}: Removing PostgreSQL database dumps{}'.format(log_prefix, dry_run_label))
for database in databases: for database in databases:
if os.path.sep in database['name']: dump_filename = make_database_dump_filename(database['name'], database.get('hostname'))
raise ValueError('Invalid database name {}'.format(database['name']))
name = database['name']
dump_path = os.path.join(
os.path.expanduser(DUMP_PATH), database.get('hostname', 'localhost')
)
dump_filename = os.path.join(dump_path, name)
logger.debug( logger.debug(
'{}: Remove PostgreSQL database dump {} from {}{}'.format( '{}: Removing PostgreSQL database dump {} from {}{}'.format(
config_filename, name, dump_filename, dry_run_label log_prefix, database['name'], dump_filename, dry_run_label
) )
) )
if dry_run: if dry_run:
continue continue
os.remove(dump_filename) os.remove(dump_filename)
dump_path = os.path.dirname(dump_filename)
if len(os.listdir(dump_path)) == 0: if len(os.listdir(dump_path)) == 0:
os.rmdir(dump_path) os.rmdir(dump_path)
def make_database_dump_patterns(names):
'''
Given a sequence of database names, return the corresponding glob patterns to match the database
dumps in an archive. An empty sequence of names indicates that the patterns should match all
dumps.
'''
return [make_database_dump_filename(name, hostname='*') for name in (names or ['*'])]
def convert_glob_patterns_to_borg_patterns(patterns):
'''
Convert a sequence of shell glob patterns like "/etc/*" to the corresponding Borg archive
patterns like "sh:etc/*".
'''
return ['sh:{}'.format(pattern.lstrip(os.path.sep)) for pattern in patterns]
def get_database_names_from_dumps(patterns):
'''
Given a sequence of database dump patterns, find the corresponding database dumps on disk and
return the database names from their filenames.
'''
return [os.path.basename(dump_path) for pattern in patterns for dump_path in glob.glob(pattern)]
def get_database_configurations(databases, names):
'''
Given the full database configuration dicts as per the configuration schema, and a sequence of
database names, filter down and yield the configuration for just the named databases.
Additionally, if a database configuration is named "all", project out that configuration for
each named database.
Raise ValueError if one of the database names cannot be matched to a database in borgmatic's
database configuration.
'''
named_databases = {database['name']: database for database in databases}
for name in names:
database = named_databases.get(name)
if database:
yield database
continue
if 'all' in named_databases:
yield {**named_databases['all'], **{'name': name}}
continue
raise ValueError(
'Cannot restore database "{}", as it is not defined in borgmatic\'s configuration'.format(
name
)
)
def restore_database_dumps(databases, log_prefix, dry_run):
'''
Restore the given PostgreSQL databases from disk. The databases are supplied as a sequence of
dicts, one dict describing each database as per the configuration schema. Use the given log
prefix in any log entries. If this is a dry run, then don't actually restore anything.
'''
if not databases:
logger.debug('{}: No PostgreSQL databases configured'.format(log_prefix))
return
dry_run_label = ' (dry run; not actually restoring anything)' if dry_run else ''
for database in databases:
dump_filename = make_database_dump_filename(database['name'], database.get('hostname'))
restore_command = (
('pg_restore', '--no-password', '--clean', '--if-exists', '--exit-on-error')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ ('--dbname', database['name'])
+ (dump_filename,)
)
extra_environment = {'PGPASSWORD': database['password']} if 'password' in database else None
analyze_command = (
('psql', '--no-password', '--quiet')
+ (('--host', database['hostname']) if 'hostname' in database else ())
+ (('--port', str(database['port'])) if 'port' in database else ())
+ (('--username', database['username']) if 'username' in database else ())
+ ('--dbname', database['name'])
+ ('--command', 'ANALYZE')
)
logger.debug(
'{}: Restoring PostgreSQL database {}{}'.format(
log_prefix, database['name'], dry_run_label
)
)
if not dry_run:
execute_command(restore_command, extra_environment=extra_environment)
execute_command(analyze_command, extra_environment=extra_environment)

View file

@ -3,7 +3,7 @@ FROM python:3.7.4-alpine3.10 as borgmatic
COPY . /app COPY . /app
RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml RUN pip install --no-cache /app && generate-borgmatic-config && chmod +r /etc/borgmatic/config.yaml
RUN borgmatic --help > /command-line.txt \ RUN borgmatic --help > /command-line.txt \
&& for action in init prune create check extract list info; do \ && for action in init prune create check extract restore list info; do \
echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \ echo -e "\n--------------------------------------------------------------------------------\n" >> /command-line.txt \
&& borgmatic "$action" --help >> /command-line.txt; done && borgmatic "$action" --help >> /command-line.txt; done

View file

@ -1,3 +1,4 @@
<header class="elv-layout elv-layout-full elv-header{% if headerClass %} {{ headerClass }}{% endif %}"> <header class="elv-layout elv-layout-full elv-header{% if headerClass %} {{ headerClass }}{% endif %}">
<h1 class="elv-hed">{{ title | safe }}</h1> {% if page.url != '/' %}<h3><a href="https://torsion.org/borgmatic/">borgmatic</a></h3>{% endif %}
<h1 class="elv-hed">{{ title | safe }}</h1>
</header> </header>

View file

@ -49,6 +49,16 @@ hooks:
Note that you may need to use a `username` of the `postgres` superuser for Note that you may need to use a `username` of the `postgres` superuser for
this to work. this to work.
### Configuration backups
An important note about this database configuration: You'll need the
configuration to be present in order for borgmatic to restore a database. So
to prepare for this situation, it's a good idea to include borgmatic's own
configuration files as part of your regular backups. That way, you can always
bring back any missing configuration files in order to restore a database.
## Supported databases ## Supported databases
As of now, borgmatic only supports PostgreSQL databases directly. But see As of now, borgmatic only supports PostgreSQL databases directly. But see
@ -57,12 +67,89 @@ with other database systems. Also, please [file a
ticket](https://torsion.org/borgmatic/#issues) for additional database systems ticket](https://torsion.org/borgmatic/#issues) for additional database systems
that you'd like supported. that you'd like supported.
## Database restoration ## Database restoration
borgmatic does not yet perform integrated database restoration when you To restore a database dump from an archive, use the `borgmatic restore`
[restore a backup](http://localhost:8080/docs/how-to/restore-a-backup/), but action. But the first step is to figure out which archive to restore from. A
that feature is coming in a future release. In the meantime, you can restore good way to do that is to use the `list` action:
a database manually after restoring a dump file in the `~/.borgmatic` path.
```bash
borgmatic list
```
(No borgmatic `list` action? Try the old-style `--list`, or upgrade
borgmatic!)
That should yield output looking something like:
```text
host-2019-01-01T04:05:06.070809 Tue, 2019-01-01 04:05:06 [...]
host-2019-01-02T04:06:07.080910 Wed, 2019-01-02 04:06:07 [...]
```
Assuming that you want to restore all database dumps from the archive with the
most up-to-date files and therefore the latest timestamp, run a command like:
```bash
borgmatic restore --archive host-2019-01-02T04:06:07.080910
```
(No borgmatic `restore` action? Upgrade borgmatic!)
The `--archive` value is the name of the archive to restore from. This
restores all databases dumps that borgmatic originally backed up to that
archive.
This is a destructive action! `borgmatic restore` replaces live databases by
restoring dumps from the selected archive. So be very careful when and where
you run it.
### Repository selection
If you have a single repository in your borgmatic configuration file(s), no
problem: the `restore` action figures out which repository to use.
But if you have multiple repositories configured, then you'll need to specify
the repository path containing the archive to restore. Here's an example:
```bash
borgmatic restore --repository repo.borg --archive host-2019-...
```
### Restore particular databases
If you've backed up multiple databases into an archive, and you'd only like to
restore one of them, use the `--database` flag to select one or more
databases. For instance:
```bash
borgmatic restore --archive host-2019-... --database users
```
### Limitations
There are a few important limitations with borgmatic's current database
restoration feature that you should know about:
1. You must restore as the same Unix user that created the archive containing
the database dump. That's because the user's home directory path is encoded
into the path of the database dump within the archive.
2. As mentioned above, borgmatic can only restore a database that's defined in
borgmatic's own configuration file. So include your configuration file in
backups to avoid getting caught without a way to restore a database.
3. borgmatic does not currently support backing up or restoring multiple
databases that share the exact same name on different hosts.
### Manual restoration
If you prefer to restore a database without the help of borgmatic, first
[extract](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/) an
archive containing a database dump, and then manually restore the dump file
found within the extracted `~/.borgmatic/` path (e.g. with `pg_restore`).
## Preparation and cleanup hooks ## Preparation and cleanup hooks
@ -73,9 +160,10 @@ These hooks allows you to trigger arbitrary commands or scripts before and
after backups. So if necessary, you can use these hooks to create database after backups. So if necessary, you can use these hooks to create database
dumps with any database system. dumps with any database system.
## Related documentation ## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Restore a backup](http://localhost:8080/docs/how-to/restore-a-backup/) * [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)

View file

@ -0,0 +1,95 @@
---
title: How to extract a backup
---
## Extract
When the worst happens—or you want to test your backups—the first step is
to figure out which archive to extract. A good way to do that is to use the
`list` action:
```bash
borgmatic list
```
(No borgmatic `list` action? Try the old-style `--list`, or upgrade
borgmatic!)
That should yield output looking something like:
```text
host-2019-01-01T04:05:06.070809 Tue, 2019-01-01 04:05:06 [...]
host-2019-01-02T04:06:07.080910 Wed, 2019-01-02 04:06:07 [...]
```
Assuming that you want to extract the archive with the most up-to-date files
and therefore the latest timestamp, run a command like:
```bash
borgmatic extract --archive host-2019-01-02T04:06:07.080910
```
(No borgmatic `extract` action? Try the old-style `--extract`, or upgrade
borgmatic!)
The `--archive` value is the name of the archive to extract. This extracts the
entire contents of the archive to the current directory, so make sure you're
in the right place before running the command.
## Repository selection
If you have a single repository in your borgmatic configuration file(s), no
problem: the `extract` action figures out which repository to use.
But if you have multiple repositories configured, then you'll need to specify
the repository path containing the archive to extract. Here's an example:
```bash
borgmatic extract --repository repo.borg --archive host-2019-...
```
## Extract particular files
Sometimes, you want to extract a single deleted file, rather than extracting
everything from an archive. To do that, tack on one or more `--path` values.
For instance:
```bash
borgmatic extract --archive host-2019-... --path path/1 path/2
```
Note that the specified restore paths should not have a leading slash. Like a
whole-archive extract, this also extracts into the current directory. So for
example, if you happen to be in the directory `/var` and you run the `extract`
command above, borgmatic will extract `/var/path/1` and `/var/path/2`.
## Extract to a particular destination
By default, borgmatic extracts files into the current directory. To instead
extract files to a particular destination directory, use the `--destination`
flag:
```bash
borgmatic extract --archive host-2019-... --destination /tmp
```
When using the `--destination` flag, be careful not to overwrite your system's
files with extracted files unless that is your intent.
## Database restoration
The `borgmatic extract` command only extracts files. To restore a database,
please see the [documentation on database backups and
restores](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/).
borgmatic does not perform database restoration as part of `borgmatic extract`
so that you can extract files from your archive without impacting your live
databases.
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)
* [Backup your databases](https://torsion.org/borgmatic/docs/how-to/backup-your-databases/)

View file

@ -26,12 +26,15 @@ alert. But note that if borgmatic doesn't actually run, this alert won't fire.
See [error See [error
hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks) hooks](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#error-hooks)
below for how to configure this. below for how to configure this.
4. **borgmatic Healthchecks hook**: This feature integrates with the 4. **borgmatic monitoring hooks**: This feature integrates with monitoring
[Healthchecks](https://healthchecks.io/) service, and pings Healthchecks services like [Healthchecks](https://healthchecks.io/) and
whenever borgmatic runs. That way, Healthchecks can alert you when something [Cronitor](https://cronitor.io), and pings these services whenever borgmatic
goes wrong or it doesn't hear from borgmatic for a configured interval. See runs. That way, you'll receive an alert when something goes wrong or the
service doesn't hear from borgmatic for a configured interval. See
[Healthchecks [Healthchecks
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook) hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#healthchecks-hook)
and [Cronitor
hook](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#cronitor-hook)
below for how to configure this. below for how to configure this.
3. **Third-party monitoring software**: You can use traditional monitoring 3. **Third-party monitoring software**: You can use traditional monitoring
software to consume borgmatic JSON output and track when the last software to consume borgmatic JSON output and track when the last
@ -47,8 +50,8 @@ from borgmatic for a configured interval.
really want confidence that your backups are not only running but are really want confidence that your backups are not only running but are
restorable as well, you can configure particular [consistency restorable as well, you can configure particular [consistency
checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration) checks](https://torsion.org/borgmatic/docs/how-to/deal-with-very-large-backups/#consistency-check-configuration)
or even script full [restore or even script full [extract
tests](https://torsion.org/borgmatic/docs/how-to/restore-a-backup/). tests](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/).
## Error hooks ## Error hooks
@ -115,6 +118,27 @@ mechanisms](https://healthchecks.io/#welcome-integrations) when backups fail
or it doesn't hear from borgmatic for a certain period of time. or it doesn't hear from borgmatic for a certain period of time.
## Cronitor hook
[Cronitor](https://cronitor.io/) provides "Cron monitoring and uptime healthchecks
for websites, services and APIs", and borgmatic has built-in
integration with it. Once you create a Cronitor account and cron job monitor on
their site, all you need to do is configure borgmatic with the unique "Ping
API URL" for your monitor. Here's an example:
```yaml
hooks:
cronitor: https://cronitor.link/d3x0c1
```
With this hook in place, borgmatic will ping your Cronitor monitor when a
backup begins, ends, or errors. Then you can configure Cronitor to notify you
by a [variety of
mechanisms](https://cronitor.io/docs/cron-job-notifications) when backups
fail or it doesn't hear from borgmatic for a certain period of time.
## Scripting borgmatic ## Scripting borgmatic
To consume the output of borgmatic in other software, you can include an To consume the output of borgmatic in other software, you can include an
@ -154,5 +178,5 @@ fancier with your archive listing. See `borg list --help` for more flags.
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/) * [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/) * [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/) * [Add preparation and cleanup steps to backups](https://torsion.org/borgmatic/docs/how-to/add-preparation-and-cleanup-steps-to-backups/)
* [Restore a backup](https://torsion.org/borgmatic/docs/how-to/restore-a-backup/) * [Extract a backup](https://torsion.org/borgmatic/docs/how-to/extract-a-backup/)
* [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/) * [Develop on borgmatic](https://torsion.org/borgmatic/docs/how-to/develop-on-borgmatic/)

View file

@ -1,71 +1,3 @@
--- <head>
title: How to restore a backup <meta http-equiv='refresh' content='0; URL=https://torsion.org/borgmatic/docs/how-to/extract-a-backup/'>
--- </head>
## Extract
When the worst happens—or you want to test your backups—the first step is
to figure out which archive to restore. A good way to do that is to use the
`list` action:
```bash
borgmatic list
```
(No borgmatic `list` action? Try the old-style `--list`, or upgrade
borgmatic!)
That should yield output looking something like:
```text
host-2019-01-01T04:05:06.070809 Tue, 2019-01-01 04:05:06 [...]
host-2019-01-02T04:06:07.080910 Wed, 2019-01-02 04:06:07 [...]
```
Assuming that you want to restore the archive with the most up-to-date files
and therefore the latest timestamp, run a command like:
```bash
borgmatic extract --archive host-2019-01-02T04:06:07.080910
```
(No borgmatic `extract` action? Try the old-style `--extract`, or upgrade
borgmatic!)
The `--archive` value is the name of the archive to restore. This extracts the
entire contents of the archive to the current directory, so make sure you're
in the right place before running the command.
## Repository selection
If you have a single repository in your borgmatic configuration file(s), no
problem: the `extract` action figures out which repository to use.
But if you have multiple repositories configured, then you'll need to specify
the repository path containing the archive to extract. Here's an example:
```bash
borgmatic extract --repository repo.borg --archive host-2019-...
```
## Restore particular files
Sometimes, you want to restore a single deleted file, rather than restoring
everything from an archive. To do that, tack on one or more `--restore-path`
values. For instance:
```bash
borgmatic extract --archive host-2019-... --restore-path path/1 path/2
```
Note that the specified restore paths should not have a leading slash. Like a
whole-archive restore, this also restores into the current directory. So for
example, if you happen to be in the directory `/var` and you run the `extract`
command above, borgmatic will restore `/var/path/1` and `/var/path/2`.
## Related documentation
* [Set up backups with borgmatic](https://torsion.org/borgmatic/docs/how-to/set-up-backups/)
* [Inspect your backups](https://torsion.org/borgmatic/docs/how-to/inspect-your-backups/)
* [Monitor your backups](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/)

View file

@ -1,6 +0,0 @@
#!/usr/bin/env sh
# Temporary work around for https://github.com/pypa/pip/issues/6434
python -m pip install --upgrade pip==19.1.1
python -m pip install --no-use-pep517 $*

View file

@ -6,7 +6,7 @@
set -e set -e
python -m pip install --upgrade pip==19.1.1 python -m pip install --upgrade pip==19.3.1
pip install tox==3.14.0 pip install tox==3.14.0
tox tox
apk add --no-cache borgbackup apk add --no-cache borgbackup

View file

@ -1,6 +1,6 @@
from setuptools import find_packages, setup from setuptools import find_packages, setup
VERSION = '1.4.1.dev0' VERSION = '1.4.3'
setup( setup(

View file

@ -260,18 +260,18 @@ def test_parse_arguments_allows_repository_with_list():
module.parse_arguments('--config', 'myconfig', 'list', '--repository', 'test.borg') module.parse_arguments('--config', 'myconfig', 'list', '--repository', 'test.borg')
def test_parse_arguments_disallows_archive_without_extract_or_list(): def test_parse_arguments_disallows_archive_without_extract_restore_or_list():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
with pytest.raises(SystemExit): with pytest.raises(SystemExit):
module.parse_arguments('--config', 'myconfig', '--archive', 'test') module.parse_arguments('--config', 'myconfig', '--archive', 'test')
def test_parse_arguments_disallows_restore_paths_without_extract(): def test_parse_arguments_disallows_paths_without_extract():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
with pytest.raises(SystemExit): with pytest.raises(SystemExit):
module.parse_arguments('--config', 'myconfig', '--restore-path', 'test') module.parse_arguments('--config', 'myconfig', '--path', 'test')
def test_parse_arguments_allows_archive_with_extract(): def test_parse_arguments_allows_archive_with_extract():
@ -286,6 +286,18 @@ def test_parse_arguments_allows_archive_with_dashed_extract():
module.parse_arguments('--config', 'myconfig', '--extract', '--archive', 'test') module.parse_arguments('--config', 'myconfig', '--extract', '--archive', 'test')
def test_parse_arguments_allows_archive_with_restore():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
module.parse_arguments('--config', 'myconfig', 'restore', '--archive', 'test')
def test_parse_arguments_allows_archive_with_dashed_restore():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
module.parse_arguments('--config', 'myconfig', '--restore', '--archive', 'test')
def test_parse_arguments_allows_archive_with_list(): def test_parse_arguments_allows_archive_with_list():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
@ -299,6 +311,13 @@ def test_parse_arguments_requires_archive_with_extract():
module.parse_arguments('--config', 'myconfig', 'extract') module.parse_arguments('--config', 'myconfig', 'extract')
def test_parse_arguments_requires_archive_with_restore():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
with pytest.raises(SystemExit):
module.parse_arguments('--config', 'myconfig', 'restore')
def test_parse_arguments_allows_progress_before_create(): def test_parse_arguments_allows_progress_before_create():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
@ -317,6 +336,12 @@ def test_parse_arguments_allows_progress_and_extract():
module.parse_arguments('--progress', 'extract', '--archive', 'test', 'list') module.parse_arguments('--progress', 'extract', '--archive', 'test', 'list')
def test_parse_arguments_allows_progress_and_restore():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])
module.parse_arguments('--progress', 'restore', '--archive', 'test', 'list')
def test_parse_arguments_disallows_progress_without_create(): def test_parse_arguments_disallows_progress_without_create():
flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default']) flexmock(module.collect).should_receive('get_default_config_paths').and_return(['default'])

View file

@ -7,69 +7,60 @@ from flexmock import flexmock
from borgmatic import execute as module from borgmatic import execute as module
def test_borg_command_identifies_borg_command():
assert module.borg_command(['/usr/bin/borg1', 'info'])
def test_borg_command_does_not_identify_other_command():
assert not module.borg_command(['grep', 'stuff'])
def test_execute_and_log_output_logs_each_line_separately(): def test_execute_and_log_output_logs_each_line_separately():
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'hi').once()
flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once() flexmock(module.logger).should_receive('log').with_args(logging.INFO, 'there').once()
flexmock(module).should_receive('borg_command').and_return(False) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
module.execute_and_log_output( module.execute_and_log_output(
['echo', 'hi'], output_log_level=logging.INFO, shell=False, environment=None ['echo', 'hi'],
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=False,
) )
module.execute_and_log_output( module.execute_and_log_output(
['echo', 'there'], output_log_level=logging.INFO, shell=False, environment=None ['echo', 'there'],
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=False,
) )
def test_execute_and_log_output_with_borg_warning_does_not_raise(): def test_execute_and_log_output_includes_error_output_in_exception():
flexmock(module.logger).should_receive('log') flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('borg_command').and_return(True) flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
module.execute_and_log_output(
['false'], output_log_level=logging.INFO, shell=False, environment=None
)
def test_execute_and_log_output_includes_borg_error_output_in_exception():
flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('borg_command').and_return(True)
with pytest.raises(subprocess.CalledProcessError) as error: with pytest.raises(subprocess.CalledProcessError) as error:
module.execute_and_log_output( module.execute_and_log_output(
['grep'], output_log_level=logging.INFO, shell=False, environment=None ['grep'],
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=False,
) )
assert error.value.returncode == 2 assert error.value.returncode == 2
assert error.value.output assert error.value.output
def test_execute_and_log_output_with_non_borg_error_raises(): def test_execute_and_log_output_truncates_long_error_output():
flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('borg_command').and_return(False)
with pytest.raises(subprocess.CalledProcessError) as error:
module.execute_and_log_output(
['false'], output_log_level=logging.INFO, shell=False, environment=None
)
assert error.value.returncode == 1
def test_execute_and_log_output_truncates_long_borg_error_output():
flexmock(module).ERROR_OUTPUT_MAX_LINE_COUNT = 0 flexmock(module).ERROR_OUTPUT_MAX_LINE_COUNT = 0
flexmock(module.logger).should_receive('log') flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('borg_command').and_return(False) flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
with pytest.raises(subprocess.CalledProcessError) as error: with pytest.raises(subprocess.CalledProcessError) as error:
module.execute_and_log_output( module.execute_and_log_output(
['grep'], output_log_level=logging.INFO, shell=False, environment=None ['grep'],
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=False,
) )
assert error.value.returncode == 2 assert error.value.returncode == 2
@ -78,18 +69,13 @@ def test_execute_and_log_output_truncates_long_borg_error_output():
def test_execute_and_log_output_with_no_output_logs_nothing(): def test_execute_and_log_output_with_no_output_logs_nothing():
flexmock(module.logger).should_receive('log').never() flexmock(module.logger).should_receive('log').never()
flexmock(module).should_receive('borg_command').and_return(False) flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
module.execute_and_log_output( module.execute_and_log_output(
['true'], output_log_level=logging.INFO, shell=False, environment=None ['true'],
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=False,
) )
def test_execute_and_log_output_with_error_exit_status_raises():
flexmock(module.logger).should_receive('log')
flexmock(module).should_receive('borg_command').and_return(False)
with pytest.raises(subprocess.CalledProcessError):
module.execute_and_log_output(
['grep'], output_log_level=logging.INFO, shell=False, environment=None
)

View file

@ -7,8 +7,10 @@ from borgmatic.borg import extract as module
from ..test_verbosity import insert_logging_mock from ..test_verbosity import insert_logging_mock
def insert_execute_command_mock(command): def insert_execute_command_mock(command, working_directory=None, error_on_warnings=True):
flexmock(module).should_receive('execute_command').with_args(command).once() flexmock(module).should_receive('execute_command').with_args(
command, working_directory=working_directory, error_on_warnings=error_on_warnings
).once()
def insert_execute_command_output_mock(command, result): def insert_execute_command_output_mock(command, result):
@ -86,26 +88,28 @@ def test_extract_last_archive_dry_run_calls_borg_with_lock_wait_parameters():
def test_extract_archive_calls_borg_with_restore_path_parameters(): def test_extract_archive_calls_borg_with_restore_path_parameters():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', 'repo::archive', 'path1', 'path2')) insert_execute_command_mock(('borg', 'extract', 'repo::archive', 'path1', 'path2'))
module.extract_archive( module.extract_archive(
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=['path1', 'path2'], paths=['path1', 'path2'],
location_config={}, location_config={},
storage_config={}, storage_config={},
) )
def test_extract_archive_calls_borg_with_remote_path_parameters(): def test_extract_archive_calls_borg_with_remote_path_parameters():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--remote-path', 'borg1', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--remote-path', 'borg1', 'repo::archive'))
module.extract_archive( module.extract_archive(
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={}, storage_config={},
remote_path='borg1', remote_path='borg1',
@ -113,45 +117,49 @@ def test_extract_archive_calls_borg_with_remote_path_parameters():
def test_extract_archive_calls_borg_with_numeric_owner_parameter(): def test_extract_archive_calls_borg_with_numeric_owner_parameter():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--numeric-owner', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--numeric-owner', 'repo::archive'))
module.extract_archive( module.extract_archive(
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={'numeric_owner': True}, location_config={'numeric_owner': True},
storage_config={}, storage_config={},
) )
def test_extract_archive_calls_borg_with_umask_parameters(): def test_extract_archive_calls_borg_with_umask_parameters():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--umask', '0770', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--umask', '0770', 'repo::archive'))
module.extract_archive( module.extract_archive(
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={'umask': '0770'}, storage_config={'umask': '0770'},
) )
def test_extract_archive_calls_borg_with_lock_wait_parameters(): def test_extract_archive_calls_borg_with_lock_wait_parameters():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--lock-wait', '5', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--lock-wait', '5', 'repo::archive'))
module.extract_archive( module.extract_archive(
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={'lock_wait': '5'}, storage_config={'lock_wait': '5'},
) )
def test_extract_archive_with_log_info_calls_borg_with_info_parameter(): def test_extract_archive_with_log_info_calls_borg_with_info_parameter():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--info', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--info', 'repo::archive'))
insert_logging_mock(logging.INFO) insert_logging_mock(logging.INFO)
@ -159,13 +167,14 @@ def test_extract_archive_with_log_info_calls_borg_with_info_parameter():
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={}, storage_config={},
) )
def test_extract_archive_with_log_debug_calls_borg_with_debug_parameters(): def test_extract_archive_with_log_debug_calls_borg_with_debug_parameters():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock( insert_execute_command_mock(
('borg', 'extract', '--debug', '--list', '--show-rc', 'repo::archive') ('borg', 'extract', '--debug', '--list', '--show-rc', 'repo::archive')
) )
@ -175,35 +184,54 @@ def test_extract_archive_with_log_debug_calls_borg_with_debug_parameters():
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={}, storage_config={},
) )
def test_extract_archive_calls_borg_with_dry_run_parameter(): def test_extract_archive_calls_borg_with_dry_run_parameter():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', '--dry-run', 'repo::archive')) insert_execute_command_mock(('borg', 'extract', '--dry-run', 'repo::archive'))
module.extract_archive( module.extract_archive(
dry_run=True, dry_run=True,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={}, storage_config={},
) )
def test_extract_archive_calls_borg_with_destination_path():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
insert_execute_command_mock(('borg', 'extract', 'repo::archive'), working_directory='/dest')
module.extract_archive(
dry_run=False,
repository='repo',
archive='archive',
paths=None,
location_config={},
storage_config={},
destination_path='/dest',
)
def test_extract_archive_calls_borg_with_progress_parameter(): def test_extract_archive_calls_borg_with_progress_parameter():
flexmock(module.os.path).should_receive('abspath').and_return('repo')
flexmock(module).should_receive('execute_command_without_capture').with_args( flexmock(module).should_receive('execute_command_without_capture').with_args(
('borg', 'extract', '--progress', 'repo::archive') ('borg', 'extract', '--progress', 'repo::archive'),
working_directory=None,
error_on_warnings=True,
).once() ).once()
module.extract_archive( module.extract_archive(
dry_run=False, dry_run=False,
repository='repo', repository='repo',
archive='archive', archive='archive',
restore_paths=None, paths=None,
location_config={}, location_config={},
storage_config={}, storage_config={},
progress=True, progress=True,

View file

@ -0,0 +1,17 @@
from flexmock import flexmock
from borgmatic.hooks import cronitor as module
def test_ping_cronitor_hits_ping_url():
ping_url = 'https://example.com'
append = 'failed-so-hard'
flexmock(module.requests).should_receive('get').with_args('{}/{}'.format(ping_url, append))
module.ping_cronitor(ping_url, 'config.yaml', dry_run=False, append=append)
def test_ping_cronitor_without_ping_url_does_not_raise():
flexmock(module.requests).should_receive('get').never()
module.ping_cronitor(ping_url=None, config_filename='config.yaml', dry_run=False, append='oops')

View file

@ -4,9 +4,30 @@ from flexmock import flexmock
from borgmatic.hooks import postgresql as module from borgmatic.hooks import postgresql as module
def test_make_database_dump_filename_uses_name_and_hostname():
flexmock(module.os.path).should_receive('expanduser').and_return('databases')
assert module.make_database_dump_filename('test', 'hostname') == 'databases/hostname/test'
def test_make_database_dump_filename_without_hostname_defaults_to_localhost():
flexmock(module.os.path).should_receive('expanduser').and_return('databases')
assert module.make_database_dump_filename('test') == 'databases/localhost/test'
def test_make_database_dump_filename_with_invalid_name_raises():
flexmock(module.os.path).should_receive('expanduser').and_return('databases')
with pytest.raises(ValueError):
module.make_database_dump_filename('invalid/name')
def test_dump_databases_runs_pg_dump_for_each_database(): def test_dump_databases_runs_pg_dump_for_each_database():
databases = [{'name': 'foo'}, {'name': 'bar'}] databases = [{'name': 'foo'}, {'name': 'bar'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
).and_return('databases/localhost/bar')
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
for name in ('foo', 'bar'): for name in ('foo', 'bar'):
@ -29,7 +50,9 @@ def test_dump_databases_runs_pg_dump_for_each_database():
def test_dump_databases_with_dry_run_skips_pg_dump(): def test_dump_databases_with_dry_run_skips_pg_dump():
databases = [{'name': 'foo'}, {'name': 'bar'}] databases = [{'name': 'foo'}, {'name': 'bar'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
).and_return('databases/localhost/bar')
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
flexmock(module).should_receive('execute_command').never() flexmock(module).should_receive('execute_command').never()
@ -40,16 +63,11 @@ def test_dump_databases_without_databases_does_not_raise():
module.dump_databases([], 'test.yaml', dry_run=False) module.dump_databases([], 'test.yaml', dry_run=False)
def test_dump_databases_with_invalid_database_name_raises():
databases = [{'name': 'heehee/../../etc/passwd'}]
with pytest.raises(ValueError):
module.dump_databases(databases, 'test.yaml', dry_run=True)
def test_dump_databases_runs_pg_dump_with_hostname_and_port(): def test_dump_databases_runs_pg_dump_with_hostname_and_port():
databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}] databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/database.example.org/foo'
)
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
@ -75,7 +93,9 @@ def test_dump_databases_runs_pg_dump_with_hostname_and_port():
def test_dump_databases_runs_pg_dump_with_username_and_password(): def test_dump_databases_runs_pg_dump_with_username_and_password():
databases = [{'name': 'foo', 'username': 'postgres', 'password': 'trustsome1'}] databases = [{'name': 'foo', 'username': 'postgres', 'password': 'trustsome1'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
)
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
@ -99,7 +119,9 @@ def test_dump_databases_runs_pg_dump_with_username_and_password():
def test_dump_databases_runs_pg_dump_with_format(): def test_dump_databases_runs_pg_dump_with_format():
databases = [{'name': 'foo', 'format': 'tar'}] databases = [{'name': 'foo', 'format': 'tar'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
)
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
@ -121,7 +143,9 @@ def test_dump_databases_runs_pg_dump_with_format():
def test_dump_databases_runs_pg_dump_with_options(): def test_dump_databases_runs_pg_dump_with_options():
databases = [{'name': 'foo', 'options': '--stuff=such'}] databases = [{'name': 'foo', 'options': '--stuff=such'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
)
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
@ -144,7 +168,9 @@ def test_dump_databases_runs_pg_dump_with_options():
def test_dump_databases_runs_pg_dumpall_for_all_databases(): def test_dump_databases_runs_pg_dumpall_for_all_databases():
databases = [{'name': 'all'}] databases = [{'name': 'all'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/all'
)
flexmock(module.os).should_receive('makedirs') flexmock(module.os).should_receive('makedirs')
flexmock(module).should_receive('execute_command').with_args( flexmock(module).should_receive('execute_command').with_args(
@ -157,7 +183,9 @@ def test_dump_databases_runs_pg_dumpall_for_all_databases():
def test_remove_database_dumps_removes_dump_for_each_database(): def test_remove_database_dumps_removes_dump_for_each_database():
databases = [{'name': 'foo'}, {'name': 'bar'}] databases = [{'name': 'foo'}, {'name': 'bar'}]
flexmock(module.os.path).should_receive('expanduser').and_return('databases') flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
).and_return('databases/localhost/bar')
flexmock(module.os).should_receive('listdir').and_return([]) flexmock(module.os).should_receive('listdir').and_return([])
flexmock(module.os).should_receive('rmdir') flexmock(module.os).should_receive('rmdir')
@ -180,8 +208,181 @@ def test_remove_database_dumps_without_databases_does_not_raise():
module.remove_database_dumps([], 'test.yaml', dry_run=False) module.remove_database_dumps([], 'test.yaml', dry_run=False)
def test_remove_database_dumps_with_invalid_database_name_raises(): def test_make_database_dump_patterns_converts_names_to_glob_paths():
databases = [{'name': 'heehee/../../etc/passwd'}] flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/*/foo'
).and_return('databases/*/bar')
assert module.make_database_dump_patterns(('foo', 'bar')) == [
'databases/*/foo',
'databases/*/bar',
]
def test_make_database_dump_patterns_treats_empty_names_as_matching_all_databases():
flexmock(module).should_receive('make_database_dump_filename').with_args('*', '*').and_return(
'databases/*/*'
)
assert module.make_database_dump_patterns(()) == ['databases/*/*']
def test_convert_glob_patterns_to_borg_patterns_removes_leading_slash():
assert module.convert_glob_patterns_to_borg_patterns(('/etc/foo/bar',)) == ['sh:etc/foo/bar']
def test_get_database_names_from_dumps_gets_names_from_filenames_matching_globs():
flexmock(module.glob).should_receive('glob').and_return(
('databases/localhost/foo',)
).and_return(('databases/localhost/bar',)).and_return(())
assert module.get_database_names_from_dumps(
('databases/*/foo', 'databases/*/bar', 'databases/*/baz')
) == ['foo', 'bar']
def test_get_database_configurations_only_produces_named_databases():
databases = [
{'name': 'foo', 'hostname': 'example.org'},
{'name': 'bar', 'hostname': 'example.com'},
{'name': 'baz', 'hostname': 'example.org'},
]
assert list(module.get_database_configurations(databases, ('foo', 'baz'))) == [
{'name': 'foo', 'hostname': 'example.org'},
{'name': 'baz', 'hostname': 'example.org'},
]
def test_get_database_configurations_matches_all_database():
databases = [
{'name': 'foo', 'hostname': 'example.org'},
{'name': 'all', 'hostname': 'example.com'},
]
assert list(module.get_database_configurations(databases, ('foo', 'bar', 'baz'))) == [
{'name': 'foo', 'hostname': 'example.org'},
{'name': 'bar', 'hostname': 'example.com'},
{'name': 'baz', 'hostname': 'example.com'},
]
def test_get_database_configurations_with_unknown_database_name_raises():
databases = [{'name': 'foo', 'hostname': 'example.org'}]
with pytest.raises(ValueError): with pytest.raises(ValueError):
module.remove_database_dumps(databases, 'test.yaml', dry_run=True) list(module.get_database_configurations(databases, ('foo', 'bar')))
def test_restore_database_dumps_restores_each_database():
databases = [{'name': 'foo'}, {'name': 'bar'}]
flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
).and_return('databases/localhost/bar')
for name in ('foo', 'bar'):
flexmock(module).should_receive('execute_command').with_args(
(
'pg_restore',
'--no-password',
'--clean',
'--if-exists',
'--exit-on-error',
'--dbname',
name,
'databases/localhost/{}'.format(name),
),
extra_environment=None,
).once()
flexmock(module).should_receive('execute_command').with_args(
('psql', '--no-password', '--quiet', '--dbname', name, '--command', 'ANALYZE'),
extra_environment=None,
).once()
module.restore_database_dumps(databases, 'test.yaml', dry_run=False)
def test_restore_database_dumps_without_databases_does_not_raise():
module.restore_database_dumps({}, 'test.yaml', dry_run=False)
def test_restore_database_dumps_runs_pg_restore_with_hostname_and_port():
databases = [{'name': 'foo', 'hostname': 'database.example.org', 'port': 5433}]
flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
).and_return('databases/localhost/bar')
flexmock(module).should_receive('execute_command').with_args(
(
'pg_restore',
'--no-password',
'--clean',
'--if-exists',
'--exit-on-error',
'--host',
'database.example.org',
'--port',
'5433',
'--dbname',
'foo',
'databases/localhost/foo',
),
extra_environment=None,
).once()
flexmock(module).should_receive('execute_command').with_args(
(
'psql',
'--no-password',
'--quiet',
'--host',
'database.example.org',
'--port',
'5433',
'--dbname',
'foo',
'--command',
'ANALYZE',
),
extra_environment=None,
).once()
module.restore_database_dumps(databases, 'test.yaml', dry_run=False)
def test_restore_database_dumps_runs_pg_restore_with_username_and_password():
databases = [{'name': 'foo', 'username': 'postgres', 'password': 'trustsome1'}]
flexmock(module).should_receive('make_database_dump_filename').and_return(
'databases/localhost/foo'
).and_return('databases/localhost/bar')
flexmock(module).should_receive('execute_command').with_args(
(
'pg_restore',
'--no-password',
'--clean',
'--if-exists',
'--exit-on-error',
'--username',
'postgres',
'--dbname',
'foo',
'databases/localhost/foo',
),
extra_environment={'PGPASSWORD': 'trustsome1'},
).once()
flexmock(module).should_receive('execute_command').with_args(
(
'psql',
'--no-password',
'--quiet',
'--username',
'postgres',
'--dbname',
'foo',
'--command',
'ANALYZE',
),
extra_environment={'PGPASSWORD': 'trustsome1'},
).once()
module.restore_database_dumps(databases, 'test.yaml', dry_run=False)

View file

@ -6,11 +6,54 @@ from flexmock import flexmock
from borgmatic import execute as module from borgmatic import execute as module
def test_exit_code_indicates_error_with_borg_error_is_true():
assert module.exit_code_indicates_error(('/usr/bin/borg1', 'init'), 2)
def test_exit_code_indicates_error_with_borg_warning_is_false():
assert not module.exit_code_indicates_error(('/usr/bin/borg1', 'init'), 1)
def test_exit_code_indicates_error_with_borg_success_is_false():
assert not module.exit_code_indicates_error(('/usr/bin/borg1', 'init'), 0)
def test_exit_code_indicates_error_with_borg_error_and_error_on_warnings_is_true():
assert module.exit_code_indicates_error(('/usr/bin/borg1', 'init'), 2, error_on_warnings=True)
def test_exit_code_indicates_error_with_borg_warning_and_error_on_warnings_is_true():
assert module.exit_code_indicates_error(('/usr/bin/borg1', 'init'), 1, error_on_warnings=True)
def test_exit_code_indicates_error_with_borg_success_and_error_on_warnings_is_false():
assert not module.exit_code_indicates_error(
('/usr/bin/borg1', 'init'), 0, error_on_warnings=True
)
def test_exit_code_indicates_error_with_non_borg_error_is_true():
assert module.exit_code_indicates_error(('/usr/bin/command',), 2)
def test_exit_code_indicates_error_with_non_borg_warning_is_true():
assert module.exit_code_indicates_error(('/usr/bin/command',), 1)
def test_exit_code_indicates_error_with_non_borg_success_is_false():
assert not module.exit_code_indicates_error(('/usr/bin/command',), 0)
def test_execute_command_calls_full_command(): def test_execute_command_calls_full_command():
full_command = ['foo', 'bar'] full_command = ['foo', 'bar']
flexmock(module.os, environ={'a': 'b'}) flexmock(module.os, environ={'a': 'b'})
flexmock(module).should_receive('execute_and_log_output').with_args( flexmock(module).should_receive('execute_and_log_output').with_args(
full_command, output_log_level=logging.INFO, shell=False, environment=None full_command,
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=False,
).once() ).once()
output = module.execute_command(full_command) output = module.execute_command(full_command)
@ -22,7 +65,12 @@ def test_execute_command_calls_full_command_with_shell():
full_command = ['foo', 'bar'] full_command = ['foo', 'bar']
flexmock(module.os, environ={'a': 'b'}) flexmock(module.os, environ={'a': 'b'})
flexmock(module).should_receive('execute_and_log_output').with_args( flexmock(module).should_receive('execute_and_log_output').with_args(
full_command, output_log_level=logging.INFO, shell=True, environment=None full_command,
output_log_level=logging.INFO,
shell=True,
environment=None,
working_directory=None,
error_on_warnings=False,
).once() ).once()
output = module.execute_command(full_command, shell=True) output = module.execute_command(full_command, shell=True)
@ -34,7 +82,12 @@ def test_execute_command_calls_full_command_with_extra_environment():
full_command = ['foo', 'bar'] full_command = ['foo', 'bar']
flexmock(module.os, environ={'a': 'b'}) flexmock(module.os, environ={'a': 'b'})
flexmock(module).should_receive('execute_and_log_output').with_args( flexmock(module).should_receive('execute_and_log_output').with_args(
full_command, output_log_level=logging.INFO, shell=False, environment={'a': 'b', 'c': 'd'} full_command,
output_log_level=logging.INFO,
shell=False,
environment={'a': 'b', 'c': 'd'},
working_directory=None,
error_on_warnings=False,
).once() ).once()
output = module.execute_command(full_command, extra_environment={'c': 'd'}) output = module.execute_command(full_command, extra_environment={'c': 'd'})
@ -42,12 +95,46 @@ def test_execute_command_calls_full_command_with_extra_environment():
assert output is None assert output is None
def test_execute_command_calls_full_command_with_working_directory():
full_command = ['foo', 'bar']
flexmock(module.os, environ={'a': 'b'})
flexmock(module).should_receive('execute_and_log_output').with_args(
full_command,
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory='/working',
error_on_warnings=False,
).once()
output = module.execute_command(full_command, working_directory='/working')
assert output is None
def test_execute_command_calls_full_command_with_error_on_warnings():
full_command = ['foo', 'bar']
flexmock(module.os, environ={'a': 'b'})
flexmock(module).should_receive('execute_and_log_output').with_args(
full_command,
output_log_level=logging.INFO,
shell=False,
environment=None,
working_directory=None,
error_on_warnings=True,
).once()
output = module.execute_command(full_command, error_on_warnings=True)
assert output is None
def test_execute_command_captures_output(): def test_execute_command_captures_output():
full_command = ['foo', 'bar'] full_command = ['foo', 'bar']
expected_output = '[]' expected_output = '[]'
flexmock(module.os, environ={'a': 'b'}) flexmock(module.os, environ={'a': 'b'})
flexmock(module.subprocess).should_receive('check_output').with_args( flexmock(module.subprocess).should_receive('check_output').with_args(
full_command, shell=False, env=None full_command, shell=False, env=None, cwd=None
).and_return(flexmock(decode=lambda: expected_output)).once() ).and_return(flexmock(decode=lambda: expected_output)).once()
output = module.execute_command(full_command, output_log_level=None) output = module.execute_command(full_command, output_log_level=None)
@ -60,7 +147,7 @@ def test_execute_command_captures_output_with_shell():
expected_output = '[]' expected_output = '[]'
flexmock(module.os, environ={'a': 'b'}) flexmock(module.os, environ={'a': 'b'})
flexmock(module.subprocess).should_receive('check_output').with_args( flexmock(module.subprocess).should_receive('check_output').with_args(
full_command, shell=True, env=None full_command, shell=True, env=None, cwd=None
).and_return(flexmock(decode=lambda: expected_output)).once() ).and_return(flexmock(decode=lambda: expected_output)).once()
output = module.execute_command(full_command, output_log_level=None, shell=True) output = module.execute_command(full_command, output_log_level=None, shell=True)
@ -73,7 +160,7 @@ def test_execute_command_captures_output_with_extra_environment():
expected_output = '[]' expected_output = '[]'
flexmock(module.os, environ={'a': 'b'}) flexmock(module.os, environ={'a': 'b'})
flexmock(module.subprocess).should_receive('check_output').with_args( flexmock(module.subprocess).should_receive('check_output').with_args(
full_command, shell=False, env={'a': 'b', 'c': 'd'} full_command, shell=False, env={'a': 'b', 'c': 'd'}, cwd=None
).and_return(flexmock(decode=lambda: expected_output)).once() ).and_return(flexmock(decode=lambda: expected_output)).once()
output = module.execute_command( output = module.execute_command(
@ -83,6 +170,21 @@ def test_execute_command_captures_output_with_extra_environment():
assert output == expected_output assert output == expected_output
def test_execute_command_captures_output_with_working_directory():
full_command = ['foo', 'bar']
expected_output = '[]'
flexmock(module.os, environ={'a': 'b'})
flexmock(module.subprocess).should_receive('check_output').with_args(
full_command, shell=False, env=None, cwd='/working'
).and_return(flexmock(decode=lambda: expected_output)).once()
output = module.execute_command(
full_command, output_log_level=None, shell=False, working_directory='/working'
)
assert output == expected_output
def test_execute_command_without_capture_does_not_raise_on_success(): def test_execute_command_without_capture_does_not_raise_on_success():
flexmock(module.subprocess).should_receive('check_call').and_raise( flexmock(module.subprocess).should_receive('check_call').and_raise(
module.subprocess.CalledProcessError(0, 'borg init') module.subprocess.CalledProcessError(0, 'borg init')
@ -92,6 +194,7 @@ def test_execute_command_without_capture_does_not_raise_on_success():
def test_execute_command_without_capture_does_not_raise_on_warning(): def test_execute_command_without_capture_does_not_raise_on_warning():
flexmock(module).should_receive('exit_code_indicates_error').and_return(False)
flexmock(module.subprocess).should_receive('check_call').and_raise( flexmock(module.subprocess).should_receive('check_call').and_raise(
module.subprocess.CalledProcessError(1, 'borg init') module.subprocess.CalledProcessError(1, 'borg init')
) )
@ -100,6 +203,7 @@ def test_execute_command_without_capture_does_not_raise_on_warning():
def test_execute_command_without_capture_raises_on_error(): def test_execute_command_without_capture_raises_on_error():
flexmock(module).should_receive('exit_code_indicates_error').and_return(True)
flexmock(module.subprocess).should_receive('check_call').and_raise( flexmock(module.subprocess).should_receive('check_call').and_raise(
module.subprocess.CalledProcessError(2, 'borg init') module.subprocess.CalledProcessError(2, 'borg init')
) )

View file

@ -10,8 +10,6 @@ deps = -rtest_requirements.txt
whitelist_externals = whitelist_externals =
find find
sh sh
install_command =
sh scripts/pip {opts} {packages}
commands_pre = commands_pre =
find {toxinidir} -type f -not -path '{toxinidir}/.tox/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete find {toxinidir} -type f -not -path '{toxinidir}/.tox/*' -path '*/__pycache__/*' -name '*.py[c|o]' -delete
commands = commands =