2022-12-06 20:40:47 +01:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
import datetime
|
|
|
|
import hashlib
|
|
|
|
import json
|
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import time
|
|
|
|
import re
|
|
|
|
import subprocess
|
|
|
|
import sys
|
2023-02-02 20:30:57 +01:00
|
|
|
import traceback
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
from prometheus_client import start_http_server
|
|
|
|
from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
|
|
|
|
class ResticCollector(object):
|
2023-03-05 17:03:48 +01:00
|
|
|
def __init__(
|
|
|
|
self, repository, password_file, exit_on_error, disable_check, disable_stats
|
|
|
|
):
|
2022-12-06 20:40:47 +01:00
|
|
|
self.repository = repository
|
2023-02-02 19:48:08 +01:00
|
|
|
self.password_file = password_file
|
2023-03-05 17:03:48 +01:00
|
|
|
self.exit_on_error = exit_on_error
|
|
|
|
self.disable_check = disable_check
|
|
|
|
self.disable_stats = disable_stats
|
2022-12-06 20:40:47 +01:00
|
|
|
# todo: the stats cache increases over time -> remove old ids
|
2023-03-05 17:03:48 +01:00
|
|
|
# todo: cold start -> the stats cache could be saved in a persistent
|
|
|
|
# volume
|
|
|
|
# todo: cold start -> the restic cache (/root/.cache/restic) could be
|
|
|
|
# saved in a persistent volume
|
2022-12-06 20:40:47 +01:00
|
|
|
self.stats_cache = {}
|
|
|
|
self.metrics = {}
|
2023-03-05 17:03:48 +01:00
|
|
|
self.refresh(exit_on_error)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
def collect(self):
|
|
|
|
logging.debug("Incoming request")
|
|
|
|
|
|
|
|
common_label_names = [
|
|
|
|
"client_hostname",
|
|
|
|
"client_username",
|
2023-03-05 17:03:48 +01:00
|
|
|
"snapshot_hash",
|
|
|
|
"snapshot_tag",
|
2022-12-06 20:40:47 +01:00
|
|
|
]
|
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
check_success = GaugeMetricFamily(
|
2022-12-06 20:40:47 +01:00
|
|
|
"restic_check_success",
|
|
|
|
"Result of restic check operation in the repository",
|
2023-03-05 17:03:48 +01:00
|
|
|
labels=[],
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
snapshots_total = CounterMetricFamily(
|
2022-12-06 20:40:47 +01:00
|
|
|
"restic_snapshots_total",
|
|
|
|
"Total number of snapshots in the repository",
|
2023-03-05 17:03:48 +01:00
|
|
|
labels=[],
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
backup_timestamp = GaugeMetricFamily(
|
2022-12-06 20:40:47 +01:00
|
|
|
"restic_backup_timestamp",
|
|
|
|
"Timestamp of the last backup",
|
2023-03-05 17:03:48 +01:00
|
|
|
labels=common_label_names,
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
backup_files_total = CounterMetricFamily(
|
2022-12-06 20:40:47 +01:00
|
|
|
"restic_backup_files_total",
|
|
|
|
"Number of files in the backup",
|
2023-03-05 17:03:48 +01:00
|
|
|
labels=common_label_names,
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
backup_size_total = CounterMetricFamily(
|
2022-12-06 20:40:47 +01:00
|
|
|
"restic_backup_size_total",
|
|
|
|
"Total size of backup in bytes",
|
2023-03-05 17:03:48 +01:00
|
|
|
labels=common_label_names,
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
backup_snapshots_total = CounterMetricFamily(
|
2022-12-06 20:40:47 +01:00
|
|
|
"restic_backup_snapshots_total",
|
|
|
|
"Total number of snapshots",
|
2023-03-05 17:03:48 +01:00
|
|
|
labels=common_label_names,
|
|
|
|
)
|
|
|
|
|
|
|
|
scrape_duration_seconds = GaugeMetricFamily(
|
|
|
|
"restic_scrape_duration_seconds",
|
|
|
|
"Ammount of time each scrape takes",
|
|
|
|
labels=[],
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
check_success.add_metric([], self.metrics["check_success"])
|
|
|
|
snapshots_total.add_metric([], self.metrics["snapshots_total"])
|
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
for client in self.metrics["clients"]:
|
2022-12-06 20:40:47 +01:00
|
|
|
common_label_values = [
|
|
|
|
client["hostname"],
|
|
|
|
client["username"],
|
2023-03-05 17:03:48 +01:00
|
|
|
client["snapshot_hash"],
|
|
|
|
client["snapshot_tag"],
|
2022-12-06 20:40:47 +01:00
|
|
|
]
|
2023-03-05 17:03:48 +01:00
|
|
|
|
2022-12-06 20:40:47 +01:00
|
|
|
backup_timestamp.add_metric(common_label_values, client["timestamp"])
|
|
|
|
backup_files_total.add_metric(common_label_values, client["files_total"])
|
|
|
|
backup_size_total.add_metric(common_label_values, client["size_total"])
|
2023-03-05 17:03:48 +01:00
|
|
|
backup_snapshots_total.add_metric(
|
|
|
|
common_label_values, client["snapshots_total"]
|
|
|
|
)
|
|
|
|
|
|
|
|
scrape_duration_seconds.add_metric([], self.metrics["duration"])
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
yield check_success
|
|
|
|
yield snapshots_total
|
|
|
|
yield backup_timestamp
|
|
|
|
yield backup_files_total
|
|
|
|
yield backup_size_total
|
|
|
|
yield backup_snapshots_total
|
2023-03-05 17:03:48 +01:00
|
|
|
yield scrape_duration_seconds
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-02-02 20:30:57 +01:00
|
|
|
def refresh(self, exit_on_error=False):
|
2022-12-06 20:40:47 +01:00
|
|
|
try:
|
|
|
|
self.metrics = self.get_metrics()
|
2023-02-02 20:30:57 +01:00
|
|
|
except Exception:
|
2023-03-05 17:03:48 +01:00
|
|
|
logging.error(
|
|
|
|
"Unable to collect metrics from Restic. %s",
|
|
|
|
traceback.format_exc(0).replace("\n", " "),
|
|
|
|
)
|
|
|
|
|
|
|
|
# Shutdown exporter for any error
|
2023-02-02 20:30:57 +01:00
|
|
|
if exit_on_error:
|
|
|
|
sys.exit(1)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
def get_metrics(self):
|
2023-03-05 17:03:48 +01:00
|
|
|
duration = time.time()
|
2022-12-06 20:40:47 +01:00
|
|
|
all_snapshots = self.get_snapshots()
|
|
|
|
latest_snapshots = self.get_snapshots(True)
|
|
|
|
clients = []
|
|
|
|
for snap in latest_snapshots:
|
2023-03-05 17:03:48 +01:00
|
|
|
# Collect stats for each snap only if enabled
|
|
|
|
if self.disable_stats:
|
|
|
|
# return zero as "no-stats" value
|
|
|
|
stats = {
|
|
|
|
"total_size": -1,
|
|
|
|
"total_file_count": -1,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
stats = self.get_stats(snap["id"])
|
|
|
|
|
|
|
|
# use first element of tags if tags is present
|
|
|
|
if "tags" in snap:
|
|
|
|
tag = snap["tags"][0]
|
|
|
|
else:
|
|
|
|
tag = ""
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
time_parsed = re.sub(r"\.[^+-]+", "", snap["time"])
|
2023-02-02 20:02:12 +01:00
|
|
|
if len(time_parsed) > 19:
|
2023-03-05 17:03:48 +01:00
|
|
|
# restic 14: '2023-01-12T06:59:33.1576588+01:00' ->
|
|
|
|
# '2023-01-12T06:59:33+01:00'
|
2023-02-02 20:02:12 +01:00
|
|
|
time_format = "%Y-%m-%dT%H:%M:%S%z"
|
|
|
|
else:
|
2023-03-05 17:03:48 +01:00
|
|
|
# restic 12: '2023-02-01T14:14:19.30760523Z' ->
|
|
|
|
# '2023-02-01T14:14:19'
|
2023-02-02 20:02:12 +01:00
|
|
|
time_format = "%Y-%m-%dT%H:%M:%S"
|
2023-03-05 17:03:48 +01:00
|
|
|
timestamp = time.mktime(
|
|
|
|
datetime.datetime.strptime(time_parsed, time_format).timetuple()
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
snapshots_total = 0
|
|
|
|
for snap2 in all_snapshots:
|
2023-03-05 17:03:48 +01:00
|
|
|
if snap2["hash"] == snap["hash"]:
|
2022-12-06 20:40:47 +01:00
|
|
|
snapshots_total += 1
|
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
clients.append(
|
|
|
|
{
|
|
|
|
"hostname": snap["hostname"],
|
|
|
|
"username": snap["username"],
|
|
|
|
"snapshot_hash": snap["hash"],
|
|
|
|
"snapshot_tag": tag,
|
|
|
|
"timestamp": timestamp,
|
|
|
|
"size_total": stats["total_size"],
|
|
|
|
"files_total": stats["total_file_count"],
|
|
|
|
"snapshots_total": snapshots_total,
|
|
|
|
}
|
|
|
|
)
|
2022-12-06 20:40:47 +01:00
|
|
|
# todo: fix the commented code when the bug is fixed in restic
|
|
|
|
# https://github.com/restic/restic/issues/2126
|
|
|
|
# stats = self.get_stats()
|
2023-03-05 17:03:48 +01:00
|
|
|
|
|
|
|
if self.disable_check:
|
|
|
|
# return 2 as "no-check" value
|
|
|
|
check_success = 2
|
|
|
|
else:
|
|
|
|
check_success = self.get_check()
|
|
|
|
|
2022-12-06 20:40:47 +01:00
|
|
|
metrics = {
|
2023-03-05 17:03:48 +01:00
|
|
|
"check_success": check_success,
|
|
|
|
"clients": clients,
|
|
|
|
"snapshots_total": len(all_snapshots),
|
|
|
|
"duration": time.time() - duration
|
2022-12-06 20:40:47 +01:00
|
|
|
# 'size_total': stats['total_size'],
|
|
|
|
# 'files_total': stats['total_file_count'],
|
|
|
|
}
|
2023-03-05 17:03:48 +01:00
|
|
|
|
2022-12-06 20:40:47 +01:00
|
|
|
return metrics
|
|
|
|
|
|
|
|
def get_snapshots(self, only_latest=False):
|
|
|
|
cmd = [
|
2023-03-05 17:03:48 +01:00
|
|
|
"restic",
|
|
|
|
"-r",
|
|
|
|
self.repository,
|
|
|
|
"-p",
|
|
|
|
self.password_file,
|
|
|
|
"--no-lock",
|
|
|
|
"snapshots",
|
|
|
|
"--json",
|
2022-12-06 20:40:47 +01:00
|
|
|
]
|
2023-03-05 17:03:48 +01:00
|
|
|
|
2022-12-06 20:40:47 +01:00
|
|
|
if only_latest:
|
2023-03-05 17:03:48 +01:00
|
|
|
cmd.extend(["--latest", "1"])
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-02-02 20:30:57 +01:00
|
|
|
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
2022-12-06 20:40:47 +01:00
|
|
|
if result.returncode != 0:
|
2023-03-05 17:03:48 +01:00
|
|
|
raise Exception(
|
|
|
|
"Error executing restic snapshot command: " + self.parse_stderr(result)
|
|
|
|
)
|
|
|
|
snapshots = json.loads(result.stdout.decode("utf-8"))
|
2022-12-06 20:40:47 +01:00
|
|
|
for snap in snapshots:
|
2023-03-05 17:03:48 +01:00
|
|
|
snap["hash"] = self.calc_snapshot_hash(snap)
|
2022-12-06 20:40:47 +01:00
|
|
|
return snapshots
|
|
|
|
|
|
|
|
def get_stats(self, snapshot_id=None):
|
|
|
|
# This command is expensive in CPU/Memory (1-5 seconds),
|
|
|
|
# and much more when snapshot_id=None (3 minutes) -> we avoid this call for now
|
|
|
|
# https://github.com/restic/restic/issues/2126
|
|
|
|
if snapshot_id is not None and snapshot_id in self.stats_cache:
|
|
|
|
return self.stats_cache[snapshot_id]
|
|
|
|
|
|
|
|
cmd = [
|
2023-03-05 17:03:48 +01:00
|
|
|
"restic",
|
|
|
|
"-r",
|
|
|
|
self.repository,
|
|
|
|
"-p",
|
|
|
|
self.password_file,
|
|
|
|
"--no-lock",
|
|
|
|
"stats",
|
|
|
|
"--json",
|
2022-12-06 20:40:47 +01:00
|
|
|
]
|
|
|
|
if snapshot_id is not None:
|
|
|
|
cmd.extend([snapshot_id])
|
|
|
|
|
2023-02-02 20:30:57 +01:00
|
|
|
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
2022-12-06 20:40:47 +01:00
|
|
|
if result.returncode != 0:
|
2023-03-05 17:03:48 +01:00
|
|
|
raise Exception(
|
|
|
|
"Error executing restic stats command: " + self.parse_stderr(result)
|
|
|
|
)
|
|
|
|
stats = json.loads(result.stdout.decode("utf-8"))
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
if snapshot_id is not None:
|
|
|
|
self.stats_cache[snapshot_id] = stats
|
|
|
|
|
|
|
|
return stats
|
|
|
|
|
|
|
|
def get_check(self):
|
|
|
|
# This command takes 20 seconds or more, but it's required
|
|
|
|
cmd = [
|
2023-03-05 17:03:48 +01:00
|
|
|
"restic",
|
|
|
|
"-r",
|
|
|
|
self.repository,
|
|
|
|
"-p",
|
|
|
|
self.password_file,
|
|
|
|
"--no-lock",
|
|
|
|
"check",
|
2022-12-06 20:40:47 +01:00
|
|
|
]
|
|
|
|
|
2023-02-02 20:30:57 +01:00
|
|
|
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
2022-12-06 20:40:47 +01:00
|
|
|
if result.returncode == 0:
|
|
|
|
return 1 # ok
|
2023-03-05 17:03:48 +01:00
|
|
|
else:
|
|
|
|
logging.warning(
|
|
|
|
"Error checking the repository health. " + self.parse_stderr(result)
|
|
|
|
)
|
|
|
|
return 0 # error
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
def calc_snapshot_hash(self, snapshot: dict) -> str:
|
2023-03-05 17:03:48 +01:00
|
|
|
text = snapshot["hostname"] + ",".join(snapshot["paths"])
|
|
|
|
return hashlib.sha256(text.encode("utf-8")).hexdigest()
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-02-02 20:30:57 +01:00
|
|
|
def parse_stderr(self, result):
|
2023-03-05 17:03:48 +01:00
|
|
|
return (
|
|
|
|
result.stderr.decode("utf-8").replace("\n", " ")
|
|
|
|
+ " Exit code: "
|
|
|
|
+ str(result.returncode)
|
|
|
|
)
|
2023-02-02 20:30:57 +01:00
|
|
|
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
logging.basicConfig(
|
2023-03-05 17:03:48 +01:00
|
|
|
format="%(asctime)s %(levelname)-8s %(message)s",
|
2022-12-06 20:40:47 +01:00
|
|
|
level=logging.getLevelName(os.environ.get("LOG_LEVEL", "INFO")),
|
2023-03-05 17:03:48 +01:00
|
|
|
datefmt="%Y-%m-%d %H:%M:%S",
|
|
|
|
handlers=[logging.StreamHandler(sys.stdout)],
|
2022-12-06 20:40:47 +01:00
|
|
|
)
|
2023-03-05 17:03:48 +01:00
|
|
|
logging.info("Starting Restic Prometheus Exporter")
|
|
|
|
logging.info("It could take a while if the repository is remote")
|
2022-12-06 20:40:47 +01:00
|
|
|
|
|
|
|
try:
|
|
|
|
restic_repo_url = os.environ["RESTIC_REPO_URL"]
|
|
|
|
except Exception:
|
2023-03-05 17:03:48 +01:00
|
|
|
logging.error("The environment variable RESTIC_REPO_URL is mandatory")
|
2022-12-06 20:40:47 +01:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
try:
|
2023-02-02 19:48:08 +01:00
|
|
|
restic_repo_password_file = os.environ["RESTIC_REPO_PASSWORD_FILE"]
|
2022-12-06 20:40:47 +01:00
|
|
|
except Exception:
|
2023-03-05 17:03:48 +01:00
|
|
|
logging.error("The environment variable RESTIC_REPO_PASSWORD_FILE is mandatory")
|
2022-12-06 20:40:47 +01:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
exporter_address = os.environ.get("LISTEN_ADDRESS", "0.0.0.0")
|
|
|
|
exporter_port = int(os.environ.get("LISTEN_PORT", 8001))
|
|
|
|
exporter_refresh_interval = int(os.environ.get("REFRESH_INTERVAL", 60))
|
2023-03-05 17:03:48 +01:00
|
|
|
exporter_exit_on_error = bool(os.environ.get("EXIT_ON_ERROR", False))
|
|
|
|
exporter_disable_check = bool(os.environ.get("NO_CHECK", False))
|
|
|
|
exporter_disable_stats = bool(os.environ.get("NO_STATS", False))
|
2022-12-06 20:40:47 +01:00
|
|
|
|
2023-03-05 17:03:48 +01:00
|
|
|
try:
|
|
|
|
collector = ResticCollector(
|
|
|
|
restic_repo_url,
|
|
|
|
restic_repo_password_file,
|
|
|
|
exporter_exit_on_error,
|
|
|
|
exporter_disable_check,
|
|
|
|
exporter_disable_stats,
|
|
|
|
)
|
|
|
|
REGISTRY.register(collector)
|
|
|
|
start_http_server(exporter_port, exporter_address)
|
|
|
|
logging.info(
|
|
|
|
"Serving at http://{0}:{1}".format(exporter_address, exporter_port)
|
|
|
|
)
|
|
|
|
|
|
|
|
while True:
|
|
|
|
logging.info(
|
|
|
|
"Refreshing stats every {0} seconds".format(exporter_refresh_interval)
|
|
|
|
)
|
|
|
|
time.sleep(exporter_refresh_interval)
|
|
|
|
collector.refresh()
|
|
|
|
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
logging.info("\nInterrupted")
|
|
|
|
exit(0)
|