Merge pull request #23 from chaos-jetzt/touchups

Various smaller changes or cleanups that, to me, wouldn't warrant a PR on their
own. Besides addressing some TODOs (namely the one in the flake.nix), goals
included a reduction of redundant and ambiguous code / statements (e.g. the
isDev detection) and a reduction of (visual) complexity making the code easier
to follow, understand and review.
This commit is contained in:
Moritz 'e1mo' Fromm 2023-08-04 17:06:21 +02:00
commit 1f9d8ba77f
No known key found for this signature in database
GPG key ID: 1D5D79A439E787F1
12 changed files with 107 additions and 68 deletions

View file

@ -6,8 +6,6 @@ NixOS configuration for the [chaos.jetzt] project. They are very much work in pr
- [mumble-web](https://github.com/johni0702/mumble-web), possibly adding [mumble-web-proxy](https://github.com/johni0702/mumble-web-proxy/) on top
- Both need to be packaged for Nix
- [Dokuwiki](https://www.dokuwiki.org/dokuwiki)
- Migrate away from SSO
- [Matrix synapse](https://github.com/matrix-org/synapse) + [element-web](https://github.com/vector-im/element-web)
- Data migration (synapse)
- Migrate away from SSO (synapse)
@ -30,14 +28,13 @@ colmena build
# Build specific host(s)
colmena build --on host-a,host-b
# Deploy all hosts in test mode (activate config but do not add it to the bootloader menu)
colmena apply test
# Deploy all dev hosts in test mode (activate config but do not add it to the bootloader menu)
colmena apply --on @dev test
# Deploy specific host (actiavte config and use it at the next boot (switch goal))
colmena apply --on host-a
# A VM of the host can be built using plain nix build
nix build .\#nixosConfigurations.host-a.config.system.build.vmWithBootLoader
```

View file

@ -1,6 +1,7 @@
{ config, lib, pkgs, inputs, ... }: {
imports = [
./users.nix
../modules/deployment.nix
# Monitoring is applicable to all hosts, thus placing it here
../services/monitoring
];

View file

@ -55,11 +55,10 @@
meta.nixpkgs = import nixpkgs {
system = "x86_64-linux";
};
defaults = { name, ... }: {
defaults = { name, config, ... }: {
deployment = {
tags = if name == "shirley" then [ "prod" ] else [ "dev" ];
# TODO: It'd probably be nice to derive that from the host-configured fqdn
targetHost = "${name}.net.chaos.jetzt";
tags = [ config.cj.deployment.environment ];
targetHost = config.networking.fqdn;
targetUser = null;
};
};

View file

@ -1,5 +1,5 @@
{ lib, pkgs, baseDomain, ... }: {
_module.args.baseDomain = "dev.chaos.jetzt";
cj.deployment.environment = "dev";
imports = [
./hardware-config.nix
@ -12,16 +12,20 @@
system.stateVersion = "23.05";
networking.hostName = "goldberg";
# Fallback / for the monitoring v(x)lan
networking.useDHCP = true;
# We need to configure IPv6 statically, and if we start with that we can just also do it for IPv4
networking.interfaces.ens3.useDHCP = false;
networking.interfaces.ens3.ipv4.addresses = [ { address = "5.75.181.252"; prefixLength = 32; } ];
networking.interfaces.ens3.ipv6.addresses = [ { address = "2a01:4f8:1c1e:9e75::1"; prefixLength = 64; } ];
networking.defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
networking.defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
networking.nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
networking = {
# Fallback / for the monitoring v(x)lan
useDHCP = true;
defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
interfaces.ens3 = {
useDHCP = false;
ipv4.addresses = [ { address = "5.75.181.252"; prefixLength = 32; } ];
ipv6.addresses = [ { address = "2a01:4f8:1c1e:9e75::1"; prefixLength = 64; } ];
};
};
services.murmur = {
registerPassword = lib.mkForce "";

View file

@ -22,7 +22,6 @@
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
# boot.loader.grub.efiSupport = true;
# boot.loader.grub.efiInstallAsRemovable = true;
# boot.loader.efi.efiSysMountPoint = "/boot/efi";

View file

@ -1,5 +1,5 @@
{ pkgs, baseDomain, ... }: {
_module.args.baseDomain = "chaos.jetzt";
cj.deployment.environment = "prod";
imports = [
./hardware-config.nix
@ -12,14 +12,18 @@
system.stateVersion = "23.05";
networking.hostName = "shirley";
# Fallback / for the monitoring v(x)lan
networking.useDHCP = true;
# We need to configure IPv6 statically, and if we start with that we can just also do it for IPv4
networking.interfaces.ens3.useDHCP = false;
networking.interfaces.ens3.ipv4.addresses = [ { address = "94.130.107.245"; prefixLength = 32; } ];
networking.interfaces.ens3.ipv6.addresses = [ { address = "2a01:4f8:c0c:83eb::1"; prefixLength = 64; } ];
networking.defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
networking.defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
networking.nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
networking = {
# Fallback / for the monitoring v(x)lan
useDHCP = true;
defaultGateway = { address = "172.31.1.1"; interface = "ens3"; };
defaultGateway6 = { address = "fe80::1"; interface = "ens3"; };
nameservers = [ "213.133.98.98" "213.133.99.99" "213.133.100.100" ];
interfaces.ens3 = {
useDHCP = false;
ipv4.addresses = [ { address = "94.130.107.245"; prefixLength = 32; } ];
ipv6.addresses = [ { address = "2a01:4f8:c0c:83eb::1"; prefixLength = 64; } ];
};
};
}

View file

@ -22,7 +22,6 @@
# Use the GRUB 2 boot loader.
boot.loader.grub.enable = true;
boot.loader.grub.version = 2;
# boot.loader.grub.efiSupport = true;
# boot.loader.grub.efiInstallAsRemovable = true;
# boot.loader.efi.efiSysMountPoint = "/boot/efi";

26
modules/deployment.nix Normal file
View file

@ -0,0 +1,26 @@
{ config
, options
, lib
, ... }:
let
inherit (lib) mkOption types optionalString;
cfg = config.cj.deployment;
isDev = cfg.environment == "dev";
in
{
options.cj.deployment = {
environment = mkOption {
description = "Environment this host will be used for. Affects both colmena deploy groups and the baseDomain";
type = types.enum [ "dev" "prod" ];
};
};
config = {
_module.args = {
inherit isDev;
baseDomain = "${optionalString isDev "dev."}chaos.jetzt";
};
};
}

View file

@ -1,10 +1,12 @@
{
pkgs,
config,
lib,
baseDomain,
...
}: let
{ pkgs
, config
, lib
, baseDomain
, isDev
, ...
}:
let
fpm_pool = "dokuwiki-${dw_domain}";
fpm_cfg = config.services.phpfpm.pools.${fpm_pool};
dw_domain = "wiki.${baseDomain}";
@ -143,7 +145,7 @@ in {
};
plugin.oauthkeycloak = {
key = get_secret "dokuwiki/keycloak_key";
openidurl = "https://sso.chaos.jetzt/auth/realms/chaos-jetzt/.well-known/openid-configuration";
openidurl = "https://sso.chaos.jetzt/auth/realms/${if isDev then "dev" else "chaos-jetzt"}/.well-known/openid-configuration";
};
};

View file

@ -4,7 +4,7 @@
outputs,
...
}: let
inherit (lib) concatStringsSep mapAttrsToList hasAttrByPath getAttrFromPath filterAttrs substring singleton optionalString optional;
inherit (lib) concatStringsSep mapAttrsToList getAttrFromPath filterAttrs singleton optional;
inherit (lib) escapeRegex;
inherit (config.networking) fqdn hostName;
@ -12,11 +12,15 @@
# but on which we'd like to have included in the monitoring.
externalTargets = let
host = hostName: {
_module.args.baseDomain = "chaos.jetzt";
_module.args = {
isDev = false;
baseDomain = "chaos.jetzt";
};
config = {
networking = {
networking = rec {
inherit hostName;
domain = "net.chaos.jetzt";
fqdn = "${hostName}.${domain}";
};
services.prometheus = {
enable = true;
@ -35,34 +39,33 @@
monDomain = "mon.${config.networking.domain}";
# deadnix: skip # Will be used as soon as we have two non-dev hosts
isMe = host: host.config.networking.fqdn == fqdn;
others = filterAttrs (_: !isMe) outputs.nixosConfigurations;
isDev = host: (substring 0 3 host._module.args.baseDomain) == "dev";
# deadnix: skip # Will be used as soon as we have two non-dev hosts
isDev_ = getAttrFromPath [ "_module" "args" "isDev" ];
allHosts = outputs.nixosConfigurations // externalTargets;
/*
Right now we only have one non-dev host in our NixOS setup (the ansible hosts don't monitor the NixOS hosts).
That's why we currently add all hosts to our little monitoring "cluster". As soon as we have two or more production hosts,
the dev host can be taken out of the equation
*/
# allTargets = filterAttrs (_: c: (isMe c) || !(isDev c)) allHosts;
# allTargets = filterAttrs (_: c: (isMe c) || !(isDev_ c)) allHosts;
allTargets = allHosts;
# monFqdn = config: "${config.networking.hostName}.${monDomain}";
hasEnabled = servicePath: config: let
path = servicePath ++ ["enable"];
monTarget = service: config: "${config.networking.hostName}.${monDomain}:${toString service.port}";
targetAllHosts = servicePath: let
service = cfg: getAttrFromPath servicePath cfg.config;
in
(hasAttrByPath path config) && (getAttrFromPath path config);
mapAttrsToList
(_: c: monTarget (service c) c.config)
(filterAttrs (_: c: (service c).enable or false) allTargets);
monTarget = servicePath: config: let
port = toString (getAttrFromPath (servicePath ++ ["port"]) config);
in "${config.networking.hostName}.${monDomain}:${port}";
dropMetrics = {wildcard ? true}: extraRegexen: let
dropMetrics = extraRegexen: let
dropRegexen = [ "go_" "promhttp_metric_handler_requests_" ] ++ extraRegexen;
in
singleton {
inherit (regex);
regex = "(${concatStringsSep "|" dropRegexen})${optionalString wildcard ".*"}";
regex = "(${concatStringsSep "|" dropRegexen}).*";
source_labels = ["__name__"];
action = "drop";
};
@ -75,10 +78,6 @@
prometheusPath = ["services" "prometheus"];
alertmanagerPath = ["services" "prometheus" "alertmanager"];
targetAllHosts = servicePath:
mapAttrsToList
(_: config: monTarget servicePath config.config)
(filterAttrs (_: c: (hasEnabled servicePath c.config)) (outputs.nixosConfigurations // externalTargets));
in {
/*
Steps to edit the monitoring.htpasswd (aka. adding yourself / updating you password):
@ -155,7 +154,7 @@ in {
alertmanagers = [{
static_configs = [{
targets = [(monTarget alertmanagerPath config)];
targets = [(monTarget config.services.prometheus.alertmanager config)];
}];
}];
@ -165,11 +164,11 @@ in {
static_configs = [{
targets = [
# Only scraping to own node-exporter
(monTarget ["services" "prometheus" "exporters" "node"] config)
(monTarget config.services.prometheus.exporters.node config)
];
}];
relabel_configs = [relabelInstance];
metric_relabel_configs = dropMetrics {} [];
metric_relabel_configs = dropMetrics [];
}
{
job_name = "alertmanager";
@ -177,7 +176,7 @@ in {
targets = targetAllHosts alertmanagerPath;
}];
relabel_configs = [relabelInstance];
metric_relabel_configs = dropMetrics {} [
metric_relabel_configs = dropMetrics [
"alertmanager_http_(response_size_bytes|request_duration_seconds)_"
"alertmanager_notification_latency_seconds_"
"alertmanager_(nflog|cluster)_"
@ -190,7 +189,7 @@ in {
targets = targetAllHosts prometheusPath;
}];
relabel_configs = [relabelInstance];
metric_relabel_configs = dropMetrics {} [
metric_relabel_configs = dropMetrics [
"prometheus_(sd|tsdb|target)_"
"prometheus_(engine_query|rule_evaluation)_duration_"
"prometheus_http_(response_size_bytes|request_duration_seconds)_"

View file

@ -1,9 +1,13 @@
{ lib, config, pkgs, baseDomain, ... }:
{ lib
, config
, pkgs
, baseDomain
, isDev
, ... }:
let
vwDbUser = config.users.users.vaultwarden.name;
vwDbName = config.users.users.vaultwarden.name;
isDev = (builtins.substring 0 3 baseDomain) == "dev";
isDevStr = lib.optionalString isDev;
in {
sops.secrets = {

View file

@ -1,4 +1,10 @@
{ lib, pkgs, config, baseDomain, ...}:
{ lib
, pkgs
, config
, baseDomain
, isDev
, ...}:
let
matrixWellKnown = {
client."m.homeserver".base_url = "https://matrix.${baseDomain}/";
@ -6,7 +12,6 @@ let
};
toJSONFile = name: value: pkgs.writeText name (builtins.toJSON value);
matrixWellKnownDir = pkgs.linkFarm "matrix-well-known" (builtins.mapAttrs toJSONFile matrixWellKnown);
isDev = (builtins.substring 0 3 baseDomain) == "dev";
webroot = "${config.users.users."web-deploy".home}/public";
deployPubKey = if isDev then
"ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINRmpgMjXQCjA/YPNJvaNdKMjr0jnLtwKKbLCIisjeBw dev-deploykey@chaos.jetzt"