add corenet module and basic coredns + Corefile

This commit is contained in:
James Andariese 2024-07-16 23:40:44 -05:00
parent cafee81d46
commit a3e0114083
11 changed files with 513 additions and 23 deletions

43
deploy.sh Normal file
View File

@ -0,0 +1,43 @@
set -e
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl stop pgpool ; done
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl stop postgresql ; done
deploy() {
TARGET="$1"
shift
nixos-rebuild --flake ".#$TARGET" --target-host "$TARGET" switch --impure --use-remote-sudo "$@"
}
spread_token() {
#PW=$(
# ssh snorlax sudo cat /var/lib/rancher/k3s/server/node-token /etc/k3s.token | grep . | head -1 | grep . \
# || dd if=/dev/random bs=16 count=1 status=none | xxd -ps
#)
PW=$(
ssh snorlax sudo cat /etc/k3s.token | grep . | head -1 | grep . \
|| dd if=/dev/random bs=16 count=1 status=none | xxd -ps
)
for f in snorlax sobble rowlet;do
ssh $f "sudo bash -c 'touch /etc/k3s.token; chmod 600 /etc/k3s.token; dd of=/etc/k3s.token oflag=sync'" <<<"$PW"
done
}
k3s_reset() {
ssh $1 sudo /nix/store/*k3s*/bin/k3s-killall.sh || true
ssh $1 sudo rm -rf /var/lib/rancher/k3s /etc/rancher/k3s
}
#k3s_reset snorlax
#k3s_reset sobble
#k3s_reset rowlet
#nix run nixpkgs#natscli -- -s 172.16.1.2 kv del kine -f || true
#nix run nixpkgs#natscli -- -s 172.16.1.2 kv del k0-kine -f || true
#spread_token
#deploy snorlax
spread_token
deploy snorlax "$@"
deploy sobble "$@"
deploy rowlet "$@"
#(PW=$(dd if=/dev/random bs=16 count=1 status=none | xxd -ps);for f in 172.16.1.{2,3};do ssh $f "sudo bash -c 'cat > /etc/pool_passwd'" <<<"$PW";done)
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl start postgresql ; done
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl start pgpool ; done

View File

@ -54,6 +54,24 @@
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"interlude": {
"inputs": {
"flake-utils": "flake-utils",
@ -157,11 +175,11 @@
},
"nixpkgs_3": {
"locked": {
"lastModified": 1720954236,
"narHash": "sha256-1mEKHp4m9brvfQ0rjCca8P1WHpymK3TOr3v34ydv9bs=",
"lastModified": 1721686456,
"narHash": "sha256-nw/BnNzATDPfzpJVTnY8mcSKKsz6BJMEFRkJ332QSN0=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "53e81e790209e41f0c1efa9ff26ff2fd7ab35e27",
"rev": "575f3027caa1e291d24f1e9fb0e3a19c2f26d96b",
"type": "github"
},
"original": {
@ -185,17 +203,33 @@
"type": "indirect"
}
},
"nixpkgs_5": {
"locked": {
"lastModified": 1721838734,
"narHash": "sha256-o87oh2nLDzZ1E9+j1I6GaEvd9865OWGYvxaPSiH9DEU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1855c9961e0bfa2e776fa4b58b7d43149eeed431",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable-small",
"repo": "nixpkgs",
"type": "github"
}
},
"numbers": {
"inputs": {
"ipcalc": "ipcalc",
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1721177469,
"narHash": "sha256-8puiNyCJy6k1Pl25BgE4wUUpifO7f1hraR7JI9lAqW4=",
"lastModified": 1721622955,
"narHash": "sha256-p151jyfsLa+hCsinD6RY0XIlcag1+8ftLBvmXwQIVzI=",
"ref": "refs/heads/main",
"rev": "27af88462c971572a72a9a05c8608dca74e4a4b7",
"revCount": 13,
"rev": "f35ee655ea65eda94729cfd182713d9ae0d04ab8",
"revCount": 22,
"type": "git",
"url": "https://git.strudelline.net/cascade/numbers"
},
@ -204,6 +238,25 @@
"url": "https://git.strudelline.net/cascade/numbers"
}
},
"putex": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_5"
},
"locked": {
"lastModified": 1721862828,
"narHash": "sha256-QU3C+8DY9w+q+kmoAmRWMw96pRwrjywh/ru0n/eKs04=",
"ref": "refs/heads/main",
"rev": "31277b2e35bf4329164a927ff612198523a5c2ac",
"revCount": 6,
"type": "git",
"url": "https://git.strudelline.net/james/putex"
},
"original": {
"type": "git",
"url": "https://git.strudelline.net/james/putex"
}
},
"root": {
"inputs": {
"deploy-rs": "deploy-rs",
@ -211,6 +264,7 @@
"nixos-generators": "nixos-generators",
"nixpkgs": "nixpkgs_3",
"numbers": "numbers",
"putex": "putex",
"unstable": "unstable"
}
},
@ -244,13 +298,28 @@
"type": "github"
}
},
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"unstable": {
"locked": {
"lastModified": 1721116560,
"narHash": "sha256-++TYlGMAJM1Q+0nMVaWBSEvEUjRs7ZGiNQOpqbQApCU=",
"lastModified": 1721782431,
"narHash": "sha256-UNDpwjYxNXQet/g3mgRLsQ9zxrbm9j2JEvP4ijF3AWs=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9355fa86e6f27422963132c2c9aeedb0fb963d93",
"rev": "4f02464258baaf54992debfd010a7a3662a25536",
"type": "github"
},
"original": {

View File

@ -4,18 +4,19 @@
unstable.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
numbers.url = "git+https://git.strudelline.net/cascade/numbers";
interlude.url = "git+https://git.strudelline.net/nix/interlude";
putex.url = "git+https://git.strudelline.net/james/putex";
nixos-generators = { url = "github:nix-community/nixos-generators"; inputs.nixpkgs.follows = "nixpkgs"; };
deploy-rs.url = "github:serokell/deploy-rs";
};
outputs = { self, nixpkgs, unstable, numbers, interlude, nixos-generators, deploy-rs }@inputs:
outputs = { self, nixpkgs, unstable, numbers, interlude, putex, nixos-generators, deploy-rs }@inputs:
with builtins;
with nixpkgs.lib;
with interlude.lib;
let
includableModules =
let localModules = "${./.}" + "/modules";
dirContents = readDir (traceVal localModules);
filenames = attrNames (trace "dirContents: ${toJSON dirContents}" dirContents);
dirContents = readDir (localModules);
filenames = attrNames (dirContents);
dirs = (filter (n: dirContents."${n}" == "directory" &&
readFileType "${localModules}/${n}/default.nix" == "regular" ) filenames);
files = concatMap (filterAndStripSuffix ".nix") (filter (n: dirContents."${n}" == "regular") filenames);
@ -36,9 +37,10 @@
self.nixosModules.vmFormats
numbers.nixosModules.users
self.nixosModules.session
putex.nixosModules.default
({...}: {
# fixed values.
networking.hostName = traceVal name;
networking.hostName = name;
system.stateVersion = "24.05";
nix.settings.require-sigs = false;
})
@ -106,6 +108,5 @@
[ includableModules ]
++ (with numbers.api; map (h: buildMachine h) deployableHosts)
++ [(buildMachine' "cascade-installer" [self.nixosModules.installer] {} )]
#++ [(buildMachine' "cascade-installer" [] {} )]
);
}

7
mklocks.sh Normal file
View File

@ -0,0 +1,7 @@
#!/bin/bash
nats() {
command nix run nixpkgs#natscli -- -s 172.16.1.2 "$@"
}
nats stream add locks --defaults --discard-per-subject --subjects='lock.router' --storage=memory --discard=new --max-msgs-per-subject=1

6
modules/_tmpl.nix Normal file
View File

@ -0,0 +1,6 @@
{ config, pkgs, lib, ... }:
{
config = {
};
}

View File

@ -0,0 +1,63 @@
{ config, pkgs, lib, ... }:
{
config = {
#containers.wan-router = {
# privateNetwork = true;
# extraVeths.crwan0 = {
# hostBridge = "lan0";
# };
# extraVeths.crlan0 = {
# hostBridge = "lan0";
# localAddress = "172.16.1.111";
# };
#};
systemd.services."container@sec-router".unitConfig = {
Wants = [ "sys-subsystem-net-devices-wan0.device" ];
After = [ "sys-subsystem-net-devices-wan0.device" ];
};
#containers.sec-router = {
# autoStart = false;
# restartIfChanged = true;
# ephemeral = true;
# privateNetwork = true;
# macvlans = [ "phy4:wan0" ];
# extraVeths.scrsec0 = {
# hostBridge = "sec0";
# localAddress = "10.127.1.254/24";
# };
# extraVeths.scrlan0 = {
# hostBridge = "lan0";
# localAddress = "172.16.1.254/12";
# };
# config = {
# system.activationScripts."arpFilter" = ''
# sysctl "net.ipv4.conf.all.arp_filter"=1
# sysctl "net.ipv4.conf.default.arp_filter"=1
# '';
# networking = {
# useHostResolvConf = false;
# useNetworkd = true;
# useDHCP = false;
# interfaces."wan0" = {
# useDHCP = true;
# macAddress = "a0:ce:c8:c6:d2:5f";
# };
# };
# system.stateVersion = "24.05";
# };
#};
services.putex.putexes = {
sec-router = {
start = "/run/current-system/sw/bin/systemctl --no-block start container@sec-router.service";
stop = ''
/run/current-system/sw/bin/systemctl stop -f -s 9 container@sec-router.service
'';
};
};
};
}

126
modules/corenet.nix Normal file
View File

@ -0,0 +1,126 @@
{config, numbers, pkgs, lib, ...}:
with lib;
let
hasIface = iface: elem iface (numbers.api.hostIfaces config.networking.hostName);
strIfHasIface = iface: s: if hasIface iface then s else "";
attrsetIfHasIface = iface: as: if hasIface iface then as else {};
eltIfHasIface = iface: elt: if hasIface iface then [ elt ] else [];
in
{
imports = [
#./pgpool.nix
./udp514.nix
];
services.udp514-journal.enable = true;
services.coredns = {
enable = true;
config = ''
. {
${strIfHasIface "sec0" "bind sec0"}
${strIfHasIface "lan0" "bind lan0"}
whoami
}
'';
};
#services.postgresql = {
# enable = true;
# dataDir = "/srv/pgdata";
# settings = {
# default_transaction_isolation = "repeatable read";
# };
# authentication = ''
# host all all 10.127.1.2/29 trust
# '';
# enableTCPIP = true;
#};
#systemd.tmpfiles.rules = [
# "d /srv/pgdata 775 postgres postgres -"
#];
#services.pgpool = {
# enable = true;
# config = ''
# backend_clustering_mode = 'snapshot_isolation'
# backend_hostname0 = '10.127.1.2'
# backend_port0 = 5432
# backend_weight0 = 1
# backend_data_directory0 = '/srv/pgdata'
# backend_flag0 = ALLOW_TO_FAILOVER
# backend_hostname1 = '10.127.1.3'
# backend_port1 = 5432
# backend_weight1 = 1
# backend_data_directory1 = '/srv/pgdata'
# backend_flag1 = ALLOW_TO_FAILOVER
# listen_address = '*'
# logging_collector = true
# log_destination = 'syslog,stderr'
# log_min_messages = 'INFO'
# '';
#};
services.k3s = {
enable = true;
tokenFile = "/etc/k3s.token";
#serverAddr =
# mkIf (config.networking.hostName != "snorlax")
# "https://${(numbers.api.hostIface "snorlax" "sec0").ip}:6443";
#clusterInit = config.networking.hostName == "snorlax";
extraFlags = (
" --datastore-endpoint=nats://localhost:4222?noEmbed=true&bucket=k0-kine&replicas=2,nats://10.127.1.2:4222,nats://10.127.1.3:4222,nats://10.127.1.4:4222"+
" --disable=traefik"+
" --disable=local-storage"+
" --cluster-cidr=10.128.0.0/16"+
" --flannel-backend=host-gw"+
(strIfHasIface "sec0" " --node-ip=${(numbers.api.hostIface config.networking.hostName "sec0").ip}")+
(strIfHasIface "lan0" " --node-external-ip=${(numbers.api.hostIface config.networking.hostName "lan0").ip}")+
"");
#"--node-ip=${config.systemd.network
};
systemd.services.nats-datadir = {
requiredBy = [ "nats.service" ];
before = [ "nats.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = pkgs.writeScript "nats-datadir" ''
#!${pkgs.bash}/bin/bash
${pkgs.coreutils}/bin/mkdir -p /srv/nats
${pkgs.coreutils}/bin/chown -R nats:nats /srv/nats
${pkgs.coreutils}/bin/chmod 750 /srv/nats
'';
};
};
systemd.services.nats.unitConfig.Requires = [ "systemd-tmpfiles-resetup.service" ];
systemd.services.nats.unitConfig.After = [ "systemd-tmpfiles-resetup.service" ];
services.nats = {
enable = true;
serverName = config.networking.hostName;
dataDir = "/srv/nats";
jetstream = true;
settings = {
cluster = {
name = "cascade";
no_advertise = true;
port = 6222;
routes = [
"nats://10.127.1.2:6222"
"nats://10.127.1.3:6222"
"nats://10.127.1.4:6222"
];
};
http_port = 8222;
};
};
networking.firewall.allowedUDPPorts = [ 53 5432 9898 9999 6443 4222 6222 8222 ];
networking.firewall.allowedTCPPorts = [ 53 5432 9898 9999 6443 4222 6222 8222 ];
}

101
modules/pgpool.nix Normal file
View File

@ -0,0 +1,101 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.pgpool;
shq = lib.escapeShellArg;
configFile = pkgs.writeText "pgpool.conf" cfg.config;
in
{
options = {
services.pgpool = {
enable = mkEnableOption "pgpool-II";
config = mkOption {
default = ''
backend_clustering_mode = 'snapshot_isolation'
backend_hostname0 = '127.0.0.1'
backend_port0 = 5432
backend_weight0 = 1
logging_collector = true
log_destination = 'syslog,stderr'
log_min_messages = 'INFO'
'';
example = ''
backend_clustering_mode = 'snapshot_isolation'
backend_hostname0 = '127.0.0.1'
backend_port0 = 5432
backend_weight0 = 1
logging_collector = true
log_destination = 'syslog,stderr'
log_min_messages = 'INFO'
'';
description = ''
Verbatim pgpool.conf to use
'';
};
user = mkOption {
type = types.str;
default = "pgpool";
description = ''
User account under which pgpool runs.
'';
};
group = mkOption {
type = types.str;
default = "pgpool";
description = ''
User group under which pgpool runs.
'';
};
package = mkPackageOption pkgs "pgpool" { };
extraArgs = mkOption {
default = [];
example = [ "-dns.port=53" ];
type = types.listOf types.str;
description = "Extra arguments to pass to coredns.";
};
};
};
config = mkIf cfg.enable {
users.users.${cfg.user} = {
isSystemUser = true;
group = cfg.group;
extraGroups = mkIf config.services.postgresql.enable [ "postgres" ];
};
users.groups.${cfg.group} = {};
environment.etc."pgpool.conf" = {
source = configFile;
};
environment.systemPackages = [ cfg.package ];
systemd.services.pgpool = {
description = "pgpool-II postgresql load balancer and replication manager";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
PermissionsStartOnly = true;
LimitNPROC = 512;
LimitNOFILE = 1048576;
#CapabilityBoundingSet = "cap_net_bind_service";
#AmbientCapabilities = "cap_net_bind_service";
NoNewPrivileges = true;
User = cfg.user;
Group = cfg.group;
PIDFile = "/run/pgpool/pgpool.pid";
RuntimeDirectory = "pgpool";
ExecStart = "${getBin cfg.package}/bin/pgpool ${lib.escapeShellArgs cfg.extraArgs}";
ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
Restart = "no";
Type = "forking";
};
};
};
}

View File

@ -21,11 +21,6 @@
};
};
};
docker = {
enable = true;
enableNvidia = false;
};
containers = {
enable = true;
policy = {
@ -39,6 +34,8 @@
};
};
hardware.nvidia-container-toolkit.enable = true;
services.openssh.enable = true;
networking.firewall.enable = true;
@ -50,4 +47,14 @@
systemd.network.wait-online.enable = lib.mkDefault false;
networking.useDHCP = false;
#services.tcsd.enable = true;
security.sudo = {
enable = true;
extraRules = [
{ users = [ "%wheel" ];
commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ];
}
];
};
}

19
modules/udp514-pkg.nix Normal file
View File

@ -0,0 +1,19 @@
{ pkgs ? import <nixpkgs> {}, ... }:
with pkgs;
stdenv.mkDerivation {
name = "udp514-journal";
src = fetchFromGitHub {
owner = "eworm-de"; repo = "udp514-journal"; rev = "main";
hash = "sha256-lk2Uz3OemhXd4MMR2zFi54XCQiGjibgvT1iz0a7R1j4=";
};
buildInputs = [ systemd ];
nativeBuildInputs = [ pkg-config multimarkdown ];
buildPhase = ''
make udp514-journal
'';
installPhase = ''
mkdir -p $out/bin
cp udp514-journal $out/bin/udp514-journal
'';
}

48
modules/udp514.nix Normal file
View File

@ -0,0 +1,48 @@
{ config, lib, pkgs, ... }:
with lib;
let udp514-journal = import ./udp514-pkg.nix { inherit pkgs; };
cfg = config.services.udp514-journal;
port = 514;
# not configurable yet.
# cfg.port;
in
{
options = {
services.udp514-journal = {
enable = mkEnableOption "udp514-journal";
openFirewall = mkOption {
default = true;
type = types.bool;
description = "Whether to open the firewall for the specified port.";
};
# this is apparently not configurable yet.
#port = mkOption {
# default = 514;
# type = types.port;
# description = "udp514-journal syslog ingest port";
#};
};
};
config = mkIf cfg.enable {
systemd.services."udp514-journal" = {
description = "udp514-journal syslog to journald adapter";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
ProtectSystem = "full";
CapabilityBoundingSet = "cap_net_bind_service";
AmbientCapabilities = "cap_net_bind_service";
Type = "notify";
Restart = "always";
ExecStart = "${udp514-journal}/bin/udp514-journal";
ProtectHome = true;
PrivateDevices = true;
};
};
networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ port ];
};
}