Merge pull request #23 from chaos-jetzt/touchups
Various smaller changes or cleanups that, to me, wouldn't warrant a PR on their own. Besides addressing some TODOs (namely the one in the flake.nix), goals included a reduction of redundant and ambiguous code / statements (e.g. the isDev detection) and a reduction of (visual) complexity making the code easier to follow, understand and review.
This commit is contained in:
commit
1f9d8ba77f
12 changed files with 107 additions and 68 deletions
|
@ -6,8 +6,6 @@ NixOS configuration for the [chaos.jetzt] project. They are very much work in pr
|
||||||
|
|
||||||
- [mumble-web](https://github.com/johni0702/mumble-web), possibly adding [mumble-web-proxy](https://github.com/johni0702/mumble-web-proxy/) on top
|
- [mumble-web](https://github.com/johni0702/mumble-web), possibly adding [mumble-web-proxy](https://github.com/johni0702/mumble-web-proxy/) on top
|
||||||
- Both need to be packaged for Nix
|
- Both need to be packaged for Nix
|
||||||
- [Dokuwiki](https://www.dokuwiki.org/dokuwiki)
|
|
||||||
- Migrate away from SSO
|
|
||||||
- [Matrix synapse](https://github.com/matrix-org/synapse) + [element-web](https://github.com/vector-im/element-web)
|
- [Matrix synapse](https://github.com/matrix-org/synapse) + [element-web](https://github.com/vector-im/element-web)
|
||||||
- Data migration (synapse)
|
- Data migration (synapse)
|
||||||
- Migrate away from SSO (synapse)
|
- Migrate away from SSO (synapse)
|
||||||
|
@ -30,14 +28,13 @@ colmena build
|
||||||
# Build specific host(s)
|
# Build specific host(s)
|
||||||
colmena build --on host-a,host-b
|
colmena build --on host-a,host-b
|
||||||
|
|
||||||
# Deploy all hosts in test mode (activate config but do not add it to the bootloader menu)
|
# Deploy all dev hosts in test mode (activate config but do not add it to the bootloader menu)
|
||||||
colmena apply test
|
colmena apply --on @dev test
|
||||||
|
|
||||||
# Deploy specific host (actiavte config and use it at the next boot (switch goal))
|
# Deploy specific host (actiavte config and use it at the next boot (switch goal))
|
||||||
colmena apply --on host-a
|
colmena apply --on host-a
|
||||||
|
|
||||||
# A VM of the host can be built using plain nix build
|
# A VM of the host can be built using plain nix build
|
||||||
|
|
||||||
nix build .\#nixosConfigurations.host-a.config.system.build.vmWithBootLoader
|
nix build .\#nixosConfigurations.host-a.config.system.build.vmWithBootLoader
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
{ config, lib, pkgs, inputs, ... }: {
|
{ config, lib, pkgs, inputs, ... }: {
|
||||||
imports = [
|
imports = [
|
||||||
./users.nix
|
./users.nix
|
||||||
|
../modules/deployment.nix
|
||||||
# Monitoring is applicable to all hosts, thus placing it here
|
# Monitoring is applicable to all hosts, thus placing it here
|
||||||
../services/monitoring
|
../services/monitoring
|
||||||
];
|
];
|
||||||
|
|
|
@ -55,11 +55,10 @@
|
||||||
meta.nixpkgs = import nixpkgs {
|
meta.nixpkgs = import nixpkgs {
|
||||||
system = "x86_64-linux";
|
system = "x86_64-linux";
|
||||||
};
|
};
|
||||||
defaults = { name, ... }: {
|
defaults = { name, config, ... }: {
|
||||||
deployment = {
|
deployment = {
|
||||||
tags = if name == "shirley" then [ "prod" ] else [ "dev" ];
|
tags = [ config.cj.deployment.environment ];
|
||||||
# TODO: It'd probably be nice to derive that from the host-configured fqdn
|
targetHost = config.networking.fqdn;
|
||||||
targetHost = "${name}.net.chaos.jetzt";
|
|
||||||
targetUser = null;
|
targetUser = null;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{ lib, pkgs, baseDomain, ... }: {
|
{ lib, pkgs, baseDomain, ... }: {
|
||||||
_module.args.baseDomain = "dev.chaos.jetzt";
|
cj.deployment.environment = "dev";
|
||||||
|
|
||||||
imports = [
|
imports = [
|
||||||
./hardware-config.nix
|
./hardware-config.nix
|
||||||
|
@ -12,16 +12,20 @@
|
||||||
|
|
||||||
system.stateVersion = "23.05";
|
system.stateVersion = "23.05";
|
||||||
networking.hostName = "goldberg";
|
networking.hostName = "goldberg";
|
||||||
# Fallback / for the monitoring v(x)lan
|
|
||||||
networking.useDHCP = true;
|
|
||||||
|
|
||||||
# We need to configure IPv6 statically, and if we start with that we can just also do it for IPv4
|
networking = {
|
||||||
networking.interfaces.ens3.useDHCP = false;
|
# Fallback / for the monitoring v(x)lan
|
||||||
networking.interfaces.ens3.ipv4.addresses = [ { address = "5.75.181.252"; prefixLength = 32; } ];
|
useDHCP = true;
|
||||||
networking.interfaces.ens3.ipv6.addresses = [ { address = "2a01:4f8:1c1e:9e75::1"; prefixLength = 64; } ];
|
defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
|
||||||
networking.defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
|
defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
|
||||||
networking.defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
|
nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
|
||||||
networking.nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
|
|
||||||
|
interfaces.ens3 = {
|
||||||
|
useDHCP = false;
|
||||||
|
ipv4.addresses = [ { address = "5.75.181.252"; prefixLength = 32; } ];
|
||||||
|
ipv6.addresses = [ { address = "2a01:4f8:1c1e:9e75::1"; prefixLength = 64; } ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
services.murmur = {
|
services.murmur = {
|
||||||
registerPassword = lib.mkForce "";
|
registerPassword = lib.mkForce "";
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
|
|
||||||
# Use the GRUB 2 boot loader.
|
# Use the GRUB 2 boot loader.
|
||||||
boot.loader.grub.enable = true;
|
boot.loader.grub.enable = true;
|
||||||
boot.loader.grub.version = 2;
|
|
||||||
# boot.loader.grub.efiSupport = true;
|
# boot.loader.grub.efiSupport = true;
|
||||||
# boot.loader.grub.efiInstallAsRemovable = true;
|
# boot.loader.grub.efiInstallAsRemovable = true;
|
||||||
# boot.loader.efi.efiSysMountPoint = "/boot/efi";
|
# boot.loader.efi.efiSysMountPoint = "/boot/efi";
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{ pkgs, baseDomain, ... }: {
|
{ pkgs, baseDomain, ... }: {
|
||||||
_module.args.baseDomain = "chaos.jetzt";
|
cj.deployment.environment = "prod";
|
||||||
|
|
||||||
imports = [
|
imports = [
|
||||||
./hardware-config.nix
|
./hardware-config.nix
|
||||||
|
@ -12,14 +12,18 @@
|
||||||
|
|
||||||
system.stateVersion = "23.05";
|
system.stateVersion = "23.05";
|
||||||
networking.hostName = "shirley";
|
networking.hostName = "shirley";
|
||||||
# Fallback / for the monitoring v(x)lan
|
|
||||||
networking.useDHCP = true;
|
|
||||||
|
|
||||||
# We need to configure IPv6 statically, and if we start with that we can just also do it for IPv4
|
networking = {
|
||||||
networking.interfaces.ens3.useDHCP = false;
|
# Fallback / for the monitoring v(x)lan
|
||||||
networking.interfaces.ens3.ipv4.addresses = [ { address = "94.130.107.245"; prefixLength = 32; } ];
|
useDHCP = true;
|
||||||
networking.interfaces.ens3.ipv6.addresses = [ { address = "2a01:4f8:c0c:83eb::1"; prefixLength = 64; } ];
|
defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
|
||||||
networking.defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
|
defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
|
||||||
networking.defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
|
nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
|
||||||
networking.nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
|
|
||||||
|
interfaces.ens3 = {
|
||||||
|
useDHCP = false;
|
||||||
|
ipv4.addresses = [ { address = "94.130.107.245"; prefixLength = 32; } ];
|
||||||
|
ipv6.addresses = [ { address = "2a01:4f8:c0c:83eb::1"; prefixLength = 64; } ];
|
||||||
|
};
|
||||||
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,6 @@
|
||||||
|
|
||||||
# Use the GRUB 2 boot loader.
|
# Use the GRUB 2 boot loader.
|
||||||
boot.loader.grub.enable = true;
|
boot.loader.grub.enable = true;
|
||||||
boot.loader.grub.version = 2;
|
|
||||||
# boot.loader.grub.efiSupport = true;
|
# boot.loader.grub.efiSupport = true;
|
||||||
# boot.loader.grub.efiInstallAsRemovable = true;
|
# boot.loader.grub.efiInstallAsRemovable = true;
|
||||||
# boot.loader.efi.efiSysMountPoint = "/boot/efi";
|
# boot.loader.efi.efiSysMountPoint = "/boot/efi";
|
||||||
|
|
26
modules/deployment.nix
Normal file
26
modules/deployment.nix
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
{ config
|
||||||
|
, options
|
||||||
|
, lib
|
||||||
|
, ... }:
|
||||||
|
|
||||||
|
let
|
||||||
|
inherit (lib) mkOption types optionalString;
|
||||||
|
|
||||||
|
cfg = config.cj.deployment;
|
||||||
|
isDev = cfg.environment == "dev";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
options.cj.deployment = {
|
||||||
|
environment = mkOption {
|
||||||
|
description = "Environment this host will be used for. Affects both colmena deploy groups and the baseDomain";
|
||||||
|
type = types.enum [ "dev" "prod" ];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
config = {
|
||||||
|
_module.args = {
|
||||||
|
inherit isDev;
|
||||||
|
baseDomain = "${optionalString isDev "dev."}chaos.jetzt";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
}
|
|
@ -1,10 +1,12 @@
|
||||||
{
|
{ pkgs
|
||||||
pkgs,
|
, config
|
||||||
config,
|
, lib
|
||||||
lib,
|
, baseDomain
|
||||||
baseDomain,
|
, isDev
|
||||||
...
|
, ...
|
||||||
}: let
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
fpm_pool = "dokuwiki-${dw_domain}";
|
fpm_pool = "dokuwiki-${dw_domain}";
|
||||||
fpm_cfg = config.services.phpfpm.pools.${fpm_pool};
|
fpm_cfg = config.services.phpfpm.pools.${fpm_pool};
|
||||||
dw_domain = "wiki.${baseDomain}";
|
dw_domain = "wiki.${baseDomain}";
|
||||||
|
@ -143,7 +145,7 @@ in {
|
||||||
};
|
};
|
||||||
plugin.oauthkeycloak = {
|
plugin.oauthkeycloak = {
|
||||||
key = get_secret "dokuwiki/keycloak_key";
|
key = get_secret "dokuwiki/keycloak_key";
|
||||||
openidurl = "https://sso.chaos.jetzt/auth/realms/chaos-jetzt/.well-known/openid-configuration";
|
openidurl = "https://sso.chaos.jetzt/auth/realms/${if isDev then "dev" else "chaos-jetzt"}/.well-known/openid-configuration";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
outputs,
|
outputs,
|
||||||
...
|
...
|
||||||
}: let
|
}: let
|
||||||
inherit (lib) concatStringsSep mapAttrsToList hasAttrByPath getAttrFromPath filterAttrs substring singleton optionalString optional;
|
inherit (lib) concatStringsSep mapAttrsToList getAttrFromPath filterAttrs singleton optional;
|
||||||
inherit (lib) escapeRegex;
|
inherit (lib) escapeRegex;
|
||||||
inherit (config.networking) fqdn hostName;
|
inherit (config.networking) fqdn hostName;
|
||||||
|
|
||||||
|
@ -12,11 +12,15 @@
|
||||||
# but on which we'd like to have included in the monitoring.
|
# but on which we'd like to have included in the monitoring.
|
||||||
externalTargets = let
|
externalTargets = let
|
||||||
host = hostName: {
|
host = hostName: {
|
||||||
_module.args.baseDomain = "chaos.jetzt";
|
_module.args = {
|
||||||
|
isDev = false;
|
||||||
|
baseDomain = "chaos.jetzt";
|
||||||
|
};
|
||||||
config = {
|
config = {
|
||||||
networking = {
|
networking = rec {
|
||||||
inherit hostName;
|
inherit hostName;
|
||||||
domain = "net.chaos.jetzt";
|
domain = "net.chaos.jetzt";
|
||||||
|
fqdn = "${hostName}.${domain}";
|
||||||
};
|
};
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
|
@ -35,34 +39,33 @@
|
||||||
|
|
||||||
monDomain = "mon.${config.networking.domain}";
|
monDomain = "mon.${config.networking.domain}";
|
||||||
|
|
||||||
|
# deadnix: skip # Will be used as soon as we have two non-dev hosts
|
||||||
isMe = host: host.config.networking.fqdn == fqdn;
|
isMe = host: host.config.networking.fqdn == fqdn;
|
||||||
others = filterAttrs (_: !isMe) outputs.nixosConfigurations;
|
# deadnix: skip # Will be used as soon as we have two non-dev hosts
|
||||||
isDev = host: (substring 0 3 host._module.args.baseDomain) == "dev";
|
isDev_ = getAttrFromPath [ "_module" "args" "isDev" ];
|
||||||
allHosts = outputs.nixosConfigurations // externalTargets;
|
allHosts = outputs.nixosConfigurations // externalTargets;
|
||||||
/*
|
/*
|
||||||
Right now we only have one non-dev host in our NixOS setup (the ansible hosts don't monitor the NixOS hosts).
|
Right now we only have one non-dev host in our NixOS setup (the ansible hosts don't monitor the NixOS hosts).
|
||||||
That's why we currently add all hosts to our little monitoring "cluster". As soon as we have two or more production hosts,
|
That's why we currently add all hosts to our little monitoring "cluster". As soon as we have two or more production hosts,
|
||||||
the dev host can be taken out of the equation
|
the dev host can be taken out of the equation
|
||||||
*/
|
*/
|
||||||
# allTargets = filterAttrs (_: c: (isMe c) || !(isDev c)) allHosts;
|
# allTargets = filterAttrs (_: c: (isMe c) || !(isDev_ c)) allHosts;
|
||||||
allTargets = allHosts;
|
allTargets = allHosts;
|
||||||
|
|
||||||
# monFqdn = config: "${config.networking.hostName}.${monDomain}";
|
monTarget = service: config: "${config.networking.hostName}.${monDomain}:${toString service.port}";
|
||||||
hasEnabled = servicePath: config: let
|
targetAllHosts = servicePath: let
|
||||||
path = servicePath ++ ["enable"];
|
service = cfg: getAttrFromPath servicePath cfg.config;
|
||||||
in
|
in
|
||||||
(hasAttrByPath path config) && (getAttrFromPath path config);
|
mapAttrsToList
|
||||||
|
(_: c: monTarget (service c) c.config)
|
||||||
|
(filterAttrs (_: c: (service c).enable or false) allTargets);
|
||||||
|
|
||||||
monTarget = servicePath: config: let
|
dropMetrics = extraRegexen: let
|
||||||
port = toString (getAttrFromPath (servicePath ++ ["port"]) config);
|
|
||||||
in "${config.networking.hostName}.${monDomain}:${port}";
|
|
||||||
|
|
||||||
dropMetrics = {wildcard ? true}: extraRegexen: let
|
|
||||||
dropRegexen = [ "go_" "promhttp_metric_handler_requests_" ] ++ extraRegexen;
|
dropRegexen = [ "go_" "promhttp_metric_handler_requests_" ] ++ extraRegexen;
|
||||||
in
|
in
|
||||||
singleton {
|
singleton {
|
||||||
inherit (regex);
|
inherit (regex);
|
||||||
regex = "(${concatStringsSep "|" dropRegexen})${optionalString wildcard ".*"}";
|
regex = "(${concatStringsSep "|" dropRegexen}).*";
|
||||||
source_labels = ["__name__"];
|
source_labels = ["__name__"];
|
||||||
action = "drop";
|
action = "drop";
|
||||||
};
|
};
|
||||||
|
@ -75,10 +78,6 @@
|
||||||
|
|
||||||
prometheusPath = ["services" "prometheus"];
|
prometheusPath = ["services" "prometheus"];
|
||||||
alertmanagerPath = ["services" "prometheus" "alertmanager"];
|
alertmanagerPath = ["services" "prometheus" "alertmanager"];
|
||||||
targetAllHosts = servicePath:
|
|
||||||
mapAttrsToList
|
|
||||||
(_: config: monTarget servicePath config.config)
|
|
||||||
(filterAttrs (_: c: (hasEnabled servicePath c.config)) (outputs.nixosConfigurations // externalTargets));
|
|
||||||
in {
|
in {
|
||||||
/*
|
/*
|
||||||
Steps to edit the monitoring.htpasswd (aka. adding yourself / updating you password):
|
Steps to edit the monitoring.htpasswd (aka. adding yourself / updating you password):
|
||||||
|
@ -155,7 +154,7 @@ in {
|
||||||
|
|
||||||
alertmanagers = [{
|
alertmanagers = [{
|
||||||
static_configs = [{
|
static_configs = [{
|
||||||
targets = [(monTarget alertmanagerPath config)];
|
targets = [(monTarget config.services.prometheus.alertmanager config)];
|
||||||
}];
|
}];
|
||||||
}];
|
}];
|
||||||
|
|
||||||
|
@ -165,11 +164,11 @@ in {
|
||||||
static_configs = [{
|
static_configs = [{
|
||||||
targets = [
|
targets = [
|
||||||
# Only scraping to own node-exporter
|
# Only scraping to own node-exporter
|
||||||
(monTarget ["services" "prometheus" "exporters" "node"] config)
|
(monTarget config.services.prometheus.exporters.node config)
|
||||||
];
|
];
|
||||||
}];
|
}];
|
||||||
relabel_configs = [relabelInstance];
|
relabel_configs = [relabelInstance];
|
||||||
metric_relabel_configs = dropMetrics {} [];
|
metric_relabel_configs = dropMetrics [];
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
job_name = "alertmanager";
|
job_name = "alertmanager";
|
||||||
|
@ -177,7 +176,7 @@ in {
|
||||||
targets = targetAllHosts alertmanagerPath;
|
targets = targetAllHosts alertmanagerPath;
|
||||||
}];
|
}];
|
||||||
relabel_configs = [relabelInstance];
|
relabel_configs = [relabelInstance];
|
||||||
metric_relabel_configs = dropMetrics {} [
|
metric_relabel_configs = dropMetrics [
|
||||||
"alertmanager_http_(response_size_bytes|request_duration_seconds)_"
|
"alertmanager_http_(response_size_bytes|request_duration_seconds)_"
|
||||||
"alertmanager_notification_latency_seconds_"
|
"alertmanager_notification_latency_seconds_"
|
||||||
"alertmanager_(nflog|cluster)_"
|
"alertmanager_(nflog|cluster)_"
|
||||||
|
@ -190,7 +189,7 @@ in {
|
||||||
targets = targetAllHosts prometheusPath;
|
targets = targetAllHosts prometheusPath;
|
||||||
}];
|
}];
|
||||||
relabel_configs = [relabelInstance];
|
relabel_configs = [relabelInstance];
|
||||||
metric_relabel_configs = dropMetrics {} [
|
metric_relabel_configs = dropMetrics [
|
||||||
"prometheus_(sd|tsdb|target)_"
|
"prometheus_(sd|tsdb|target)_"
|
||||||
"prometheus_(engine_query|rule_evaluation)_duration_"
|
"prometheus_(engine_query|rule_evaluation)_duration_"
|
||||||
"prometheus_http_(response_size_bytes|request_duration_seconds)_"
|
"prometheus_http_(response_size_bytes|request_duration_seconds)_"
|
||||||
|
|
|
@ -1,9 +1,13 @@
|
||||||
{ lib, config, pkgs, baseDomain, ... }:
|
{ lib
|
||||||
|
, config
|
||||||
|
, pkgs
|
||||||
|
, baseDomain
|
||||||
|
, isDev
|
||||||
|
, ... }:
|
||||||
|
|
||||||
let
|
let
|
||||||
vwDbUser = config.users.users.vaultwarden.name;
|
vwDbUser = config.users.users.vaultwarden.name;
|
||||||
vwDbName = config.users.users.vaultwarden.name;
|
vwDbName = config.users.users.vaultwarden.name;
|
||||||
isDev = (builtins.substring 0 3 baseDomain) == "dev";
|
|
||||||
isDevStr = lib.optionalString isDev;
|
isDevStr = lib.optionalString isDev;
|
||||||
in {
|
in {
|
||||||
sops.secrets = {
|
sops.secrets = {
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
{ lib, pkgs, config, baseDomain, ...}:
|
{ lib
|
||||||
|
, pkgs
|
||||||
|
, config
|
||||||
|
, baseDomain
|
||||||
|
, isDev
|
||||||
|
, ...}:
|
||||||
|
|
||||||
let
|
let
|
||||||
matrixWellKnown = {
|
matrixWellKnown = {
|
||||||
client."m.homeserver".base_url = "https://matrix.${baseDomain}/";
|
client."m.homeserver".base_url = "https://matrix.${baseDomain}/";
|
||||||
|
@ -6,7 +12,6 @@ let
|
||||||
};
|
};
|
||||||
toJSONFile = name: value: pkgs.writeText name (builtins.toJSON value);
|
toJSONFile = name: value: pkgs.writeText name (builtins.toJSON value);
|
||||||
matrixWellKnownDir = pkgs.linkFarm "matrix-well-known" (builtins.mapAttrs toJSONFile matrixWellKnown);
|
matrixWellKnownDir = pkgs.linkFarm "matrix-well-known" (builtins.mapAttrs toJSONFile matrixWellKnown);
|
||||||
isDev = (builtins.substring 0 3 baseDomain) == "dev";
|
|
||||||
webroot = "${config.users.users."web-deploy".home}/public";
|
webroot = "${config.users.users."web-deploy".home}/public";
|
||||||
deployPubKey = if isDev then
|
deployPubKey = if isDev then
|
||||||
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINRmpgMjXQCjA/YPNJvaNdKMjr0jnLtwKKbLCIisjeBw dev-deploykey@chaos.jetzt"
|
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINRmpgMjXQCjA/YPNJvaNdKMjr0jnLtwKKbLCIisjeBw dev-deploykey@chaos.jetzt"
|
||||||
|
|
Loading…
Reference in a new issue