diff --git a/README.md b/README.md index 53889ff..82684d8 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,7 @@ borgmatic is powered by [Borg Backup](https://www.borgbackup.org/). MongoDB SQLite Healthchecks +Uptime Kuma Cronitor Cronhub PagerDuty diff --git a/borgmatic/config/schema.yaml b/borgmatic/config/schema.yaml index 9177cc7..7d4edd4 100644 --- a/borgmatic/config/schema.yaml +++ b/borgmatic/config/schema.yaml @@ -1766,6 +1766,38 @@ properties: an account at https://healthchecks.io (or self-host Healthchecks) if you'd like to use this service. See borgmatic monitoring documentation for details. + uptimekuma: + type: object + required: ['push_url'] + additionalProperties: false + properties: + push_url: + type: string + description: | + Uptime Kuma push URL without query string (do not include the + question mark or anything after it). + example: https://example.uptime.kuma/api/push/abcd1234 + states: + type: array + items: + type: string + enum: + - start + - finish + - fail + uniqueItems: true + description: | + List of one or more monitoring states to push for: "start", + "finish", and/or "fail". Defaults to pushing for all + states. + example: + - start + - finish + - fail + description: | + Configuration for a monitoring integration with Uptime Kuma using + the Push monitor type. + See more information here: https://uptime.kuma.pet cronitor: type: object required: ['ping_url'] diff --git a/borgmatic/hooks/dispatch.py b/borgmatic/hooks/dispatch.py index d437c98..d41ebab 100644 --- a/borgmatic/hooks/dispatch.py +++ b/borgmatic/hooks/dispatch.py @@ -13,6 +13,7 @@ from borgmatic.hooks import ( pagerduty, postgresql, sqlite, + uptimekuma, ) logger = logging.getLogger(__name__) @@ -22,6 +23,7 @@ HOOK_NAME_TO_MODULE = { 'cronhub': cronhub, 'cronitor': cronitor, 'healthchecks': healthchecks, + 'loki': loki, 'mariadb_databases': mariadb, 'mongodb_databases': mongodb, 'mysql_databases': mysql, @@ -29,7 +31,7 @@ HOOK_NAME_TO_MODULE = { 'pagerduty': pagerduty, 'postgresql_databases': postgresql, 'sqlite_databases': sqlite, - 'loki': loki, + 'uptimekuma': uptimekuma, } diff --git a/borgmatic/hooks/monitor.py b/borgmatic/hooks/monitor.py index 0cbfef4..abe28c5 100644 --- a/borgmatic/hooks/monitor.py +++ b/borgmatic/hooks/monitor.py @@ -1,6 +1,15 @@ from enum import Enum -MONITOR_HOOK_NAMES = ('apprise', 'healthchecks', 'cronitor', 'cronhub', 'pagerduty', 'ntfy', 'loki') +MONITOR_HOOK_NAMES = ( + 'apprise', + 'healthchecks', + 'cronitor', + 'cronhub', + 'pagerduty', + 'ntfy', + 'loki', + 'uptimekuma', +) class State(Enum): diff --git a/borgmatic/hooks/uptimekuma.py b/borgmatic/hooks/uptimekuma.py new file mode 100644 index 0000000..75731be --- /dev/null +++ b/borgmatic/hooks/uptimekuma.py @@ -0,0 +1,51 @@ +import logging + +import requests + +logger = logging.getLogger(__name__) + + +def initialize_monitor( + push_url, config, config_filename, monitoring_log_level, dry_run +): # pragma: no cover + ''' + No initialization is necessary for this monitor. + ''' + pass + + +def ping_monitor(hook_config, config, config_filename, state, monitoring_log_level, dry_run): + ''' + Make a get request to the configured Uptime Kuma push_url. + Use the given configuration filename in any log entries. + If this is a dry run, then don't actually push anything. + ''' + run_states = hook_config.get('states', ['start', 'finish', 'fail']) + if state.name.lower() not in run_states: + return + dry_run_label = ' (dry run; not actually pushing)' if dry_run else '' + status = 'down' if state.name.lower() == 'fail' else 'up' + push_url = hook_config.get('push_url', 'https://example.uptime.kuma/api/push/abcd1234') + query = f'status={status}&msg={state.name.lower()}' + logger.info( + f'{config_filename}: Pushing Uptime Kuma push_url {push_url}?{query} {dry_run_label}' + ) + logger.debug(f'{config_filename}: Full Uptime Kuma state URL {push_url}?{query}') + if dry_run: + return + logging.getLogger('urllib3').setLevel(logging.ERROR) + try: + response = requests.get(f'{push_url}?{query}') + if not response.ok: + response.raise_for_status() + except requests.exceptions.RequestException as error: + logger.warning(f'{config_filename}: Uptime Kuma error: {error}') + + +def destroy_monitor( + push_url_or_uuid, config, config_filename, monitoring_log_level, dry_run +): # pragma: no cover + ''' + No destruction is necessary for this monitor. + ''' + pass diff --git a/docs/how-to/monitor-your-backups.md b/docs/how-to/monitor-your-backups.md index 8699b7b..3786d93 100644 --- a/docs/how-to/monitor-your-backups.md +++ b/docs/how-to/monitor-your-backups.md @@ -46,6 +46,7 @@ them as backups happen: * [ntfy](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#ntfy-hook) * [Grafana Loki](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#loki-hook) * [Apprise](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#apprise-hook) + * [Uptime Kuma](https://torsion.org/borgmatic/docs/how-to/monitor-your-backups/#uptimekuma-hook) The idea is that you'll receive an alert when something goes wrong or when the service doesn't hear from borgmatic for a configured interval (if supported). @@ -505,6 +506,60 @@ See the [configuration reference](https://torsion.org/borgmatic/docs/reference/configuration/) for details. +## Uptime Kuma hook + +[Uptime Kuma](https://uptime.kuma.pet) is an easy-to-use self-hosted +monitoring tool and can provide a Push monitor type to accept +HTTP `GET` requests from a service instead of contacting it +directly. + +Uptime Kuma allows you to see a history of monitor states and +can in turn alert via Ntfy, Gotify, Matrix, Apprise, Email, and many more. + +An example configuration is shown here with all the available options: + +```yaml +uptimekuma: + push_url: https://kuma.my-domain.com/api/push/abcd1234 + states: + - start + - finish + - fail +``` +The `push_url` is provided to your from your Uptime Kuma service and +includes a query string; the text including and after the question mark ('?'). +Please do not include the query string in the `push_url` configuration, +borgmatic will add this automatically depending on the state of your backup. + +Using `start`, `finish` and `fail` states means you will get two 'up beats' in +Uptime Kuma for successful backups and the ability to see on failures if +and when the backup started (was there a `start` beat?). + +A reasonable base-level configuration for an Uptime Kuma Monitor +for a backup is below: + +```ini +# These are to be entered into Uptime Kuma and not into your +# borgmatic configuration. + +Monitor Type = Push +# Push monitors wait for the client to contact Uptime Kuma +# instead of Uptime Kuma contacting the client. +# This is perfect for backup monitoring. + +Heartbeat Interval = 90000 # = 25 hours = 1 day + 1 hour + +# Wait 6 times the Heartbeat Retry (below) before logging a heartbeat missed +Retries = 6 + +# Multiplied by Retries this gives a grace period within which +# the monitor goes into the "Pending" state +Heartbeat Retry = 360 # = 10 minutes + +# For each Heartbeat Interval if the backup fails repeatedly, +# a notification is sent each time. +Resend Notification every X times = 1 +``` ## Scripting borgmatic diff --git a/docs/static/uptimekuma.png b/docs/static/uptimekuma.png new file mode 100644 index 0000000..fe7d171 Binary files /dev/null and b/docs/static/uptimekuma.png differ diff --git a/tests/unit/hooks/test_uptimekuma.py b/tests/unit/hooks/test_uptimekuma.py new file mode 100644 index 0000000..05d5802 --- /dev/null +++ b/tests/unit/hooks/test_uptimekuma.py @@ -0,0 +1,165 @@ +from flexmock import flexmock + +import borgmatic.hooks.monitor +from borgmatic.hooks import uptimekuma as module + +DEFAULT_PUSH_URL = 'https://example.uptime.kuma/api/push/abcd1234' +CUSTOM_PUSH_URL = 'https://uptime.example.com/api/push/efgh5678' + + +def test_ping_monitor_hits_default_uptimekuma_on_fail(): + hook_config = {} + flexmock(module.requests).should_receive('get').with_args( + f'{DEFAULT_PUSH_URL}?status=down&msg=fail' + ).and_return(flexmock(ok=True)).once() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FAIL, + monitoring_log_level=1, + dry_run=False, + ) + + +def test_ping_monitor_hits_custom_uptimekuma_on_fail(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').with_args( + f'{CUSTOM_PUSH_URL}?status=down&msg=fail' + ).and_return(flexmock(ok=True)).once() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FAIL, + monitoring_log_level=1, + dry_run=False, + ) + + +def test_ping_monitor_custom_uptimekuma_on_start(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').with_args( + f'{CUSTOM_PUSH_URL}?status=up&msg=start' + ).and_return(flexmock(ok=True)).once() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.START, + monitoring_log_level=1, + dry_run=False, + ) + + +def test_ping_monitor_custom_uptimekuma_on_finish(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').with_args( + f'{CUSTOM_PUSH_URL}?status=up&msg=finish' + ).and_return(flexmock(ok=True)).once() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FINISH, + monitoring_log_level=1, + dry_run=False, + ) + + +def test_ping_monitor_does_not_hit_custom_uptimekuma_on_fail_dry_run(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').never() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FAIL, + monitoring_log_level=1, + dry_run=True, + ) + + +def test_ping_monitor_does_not_hit_custom_uptimekuma_on_start_dry_run(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').never() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.START, + monitoring_log_level=1, + dry_run=True, + ) + + +def test_ping_monitor_does_not_hit_custom_uptimekuma_on_finish_dry_run(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').never() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FINISH, + monitoring_log_level=1, + dry_run=True, + ) + + +def test_ping_monitor_with_connection_error_logs_warning(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').with_args( + f'{CUSTOM_PUSH_URL}?status=down&msg=fail' + ).and_raise(module.requests.exceptions.ConnectionError) + flexmock(module.logger).should_receive('warning').once() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FAIL, + monitoring_log_level=1, + dry_run=False, + ) + + +def test_ping_monitor_with_other_error_logs_warning(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + response = flexmock(ok=False) + response.should_receive('raise_for_status').and_raise( + module.requests.exceptions.RequestException + ) + flexmock(module.requests).should_receive('post').with_args( + f'{CUSTOM_PUSH_URL}?status=down&msg=fail' + ).and_return(response) + flexmock(module.logger).should_receive('warning').once() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.FAIL, + monitoring_log_level=1, + dry_run=False, + ) + + +def test_ping_monitor_with_invalid_run_state(): + hook_config = {'push_url': CUSTOM_PUSH_URL} + flexmock(module.requests).should_receive('get').never() + + module.ping_monitor( + hook_config, + {}, + 'config.yaml', + borgmatic.hooks.monitor.State.LOG, + monitoring_log_level=1, + dry_run=True, + )