Compare commits

...

7 Commits

22 changed files with 665 additions and 48 deletions

43
deploy.sh Normal file
View File

@ -0,0 +1,43 @@
set -e
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl stop pgpool ; done
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl stop postgresql ; done
deploy() {
TARGET="$1"
shift
nixos-rebuild --flake ".#$TARGET" --target-host "$TARGET" switch --impure --use-remote-sudo "$@"
}
spread_token() {
#PW=$(
# ssh snorlax sudo cat /var/lib/rancher/k3s/server/node-token /etc/k3s.token | grep . | head -1 | grep . \
# || dd if=/dev/random bs=16 count=1 status=none | xxd -ps
#)
PW=$(
ssh snorlax sudo cat /etc/k3s.token | grep . | head -1 | grep . \
|| dd if=/dev/random bs=16 count=1 status=none | xxd -ps
)
for f in snorlax sobble rowlet;do
ssh $f "sudo bash -c 'touch /etc/k3s.token; chmod 600 /etc/k3s.token; dd of=/etc/k3s.token oflag=sync'" <<<"$PW"
done
}
k3s_reset() {
ssh $1 sudo /nix/store/*k3s*/bin/k3s-killall.sh || true
ssh $1 sudo rm -rf /var/lib/rancher/k3s /etc/rancher/k3s
}
#k3s_reset snorlax
#k3s_reset sobble
#k3s_reset rowlet
#nix run nixpkgs#natscli -- -s 172.16.1.2 kv del kine -f || true
#nix run nixpkgs#natscli -- -s 172.16.1.2 kv del k0-kine -f || true
#spread_token
#deploy snorlax
#spread_token
deploy snorlax "$@"
deploy sobble "$@"
deploy rowlet "$@"
#(PW=$(dd if=/dev/random bs=16 count=1 status=none | xxd -ps);for f in 172.16.1.{2,3};do ssh $f "sudo bash -c 'cat > /etc/pool_passwd'" <<<"$PW";done)
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl start postgresql ; done
#for f in 172.16.1.{2,3};do ssh -l james $f sudo systemctl start pgpool ; done

View File

@ -54,6 +54,24 @@
"type": "github" "type": "github"
} }
}, },
"flake-utils_2": {
"inputs": {
"systems": "systems_3"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"interlude": { "interlude": {
"inputs": { "inputs": {
"flake-utils": "flake-utils", "flake-utils": "flake-utils",
@ -90,11 +108,11 @@
}, },
"nixlib": { "nixlib": {
"locked": { "locked": {
"lastModified": 1719708727, "lastModified": 1723942470,
"narHash": "sha256-XFNKtyirrGNdehpg7lMNm1skEcBApjqGhaHc/OI95HY=", "narHash": "sha256-QdSArN0xKESEOTcv+3kE6yu4B4WX9lupZ4+Htx3RXGg=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nixpkgs.lib", "repo": "nixpkgs.lib",
"rev": "1bba8a624b3b9d4f68db94fb63aaeb46039ce9e6", "rev": "531a2e8416a6d8200a53eddfbdb8f2c8dc4a1251",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -111,11 +129,11 @@
] ]
}, },
"locked": { "locked": {
"lastModified": 1720859326, "lastModified": 1724028932,
"narHash": "sha256-i8BiZj5faQS6gsupE0S9xtiyZmWinGpVLwxXWV342aQ=", "narHash": "sha256-U11ZiQPrpIBdv7oS23bNdX9GCxe/hPf/ARr64P2Wj1Y=",
"owner": "nix-community", "owner": "nix-community",
"repo": "nixos-generators", "repo": "nixos-generators",
"rev": "076ea5b672bb1ea535ee84cfdabd0c2f0b7f20c7", "rev": "5fd22603892e4ec5ac6085058ed658243143aacd",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -157,11 +175,11 @@
}, },
"nixpkgs_3": { "nixpkgs_3": {
"locked": { "locked": {
"lastModified": 1720954236, "lastModified": 1723938990,
"narHash": "sha256-1mEKHp4m9brvfQ0rjCca8P1WHpymK3TOr3v34ydv9bs=", "narHash": "sha256-9tUadhnZQbWIiYVXH8ncfGXGvkNq3Hag4RCBEMUk7MI=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "53e81e790209e41f0c1efa9ff26ff2fd7ab35e27", "rev": "c42fcfbdfeae23e68fc520f9182dde9f38ad1890",
"type": "github" "type": "github"
}, },
"original": { "original": {
@ -185,17 +203,33 @@
"type": "indirect" "type": "indirect"
} }
}, },
"nixpkgs_5": {
"locked": {
"lastModified": 1721838734,
"narHash": "sha256-o87oh2nLDzZ1E9+j1I6GaEvd9865OWGYvxaPSiH9DEU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "1855c9961e0bfa2e776fa4b58b7d43149eeed431",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-unstable-small",
"repo": "nixpkgs",
"type": "github"
}
},
"numbers": { "numbers": {
"inputs": { "inputs": {
"ipcalc": "ipcalc", "ipcalc": "ipcalc",
"nixpkgs": "nixpkgs_4" "nixpkgs": "nixpkgs_4"
}, },
"locked": { "locked": {
"lastModified": 1721177469, "lastModified": 1724036520,
"narHash": "sha256-8puiNyCJy6k1Pl25BgE4wUUpifO7f1hraR7JI9lAqW4=", "narHash": "sha256-KJU6W5qghjMTjlTFnK0F2zJVw0qmTfC6nkMBhUNgjow=",
"ref": "refs/heads/main", "ref": "refs/heads/main",
"rev": "27af88462c971572a72a9a05c8608dca74e4a4b7", "rev": "4550d62254e030c9075343a4897a985fcfda1fd6",
"revCount": 13, "revCount": 29,
"type": "git", "type": "git",
"url": "https://git.strudelline.net/cascade/numbers" "url": "https://git.strudelline.net/cascade/numbers"
}, },
@ -204,6 +238,25 @@
"url": "https://git.strudelline.net/cascade/numbers" "url": "https://git.strudelline.net/cascade/numbers"
} }
}, },
"putex": {
"inputs": {
"flake-utils": "flake-utils_2",
"nixpkgs": "nixpkgs_5"
},
"locked": {
"lastModified": 1721923974,
"narHash": "sha256-yz3VioYJXUTdl4TU1RZnGbRMj3ng3OTtVDEbGPFXGLE=",
"ref": "refs/heads/main",
"rev": "eed14b5adada7325e916dfc3a89cbd4beef806a8",
"revCount": 7,
"type": "git",
"url": "https://git.strudelline.net/james/putex"
},
"original": {
"type": "git",
"url": "https://git.strudelline.net/james/putex"
}
},
"root": { "root": {
"inputs": { "inputs": {
"deploy-rs": "deploy-rs", "deploy-rs": "deploy-rs",
@ -211,6 +264,7 @@
"nixos-generators": "nixos-generators", "nixos-generators": "nixos-generators",
"nixpkgs": "nixpkgs_3", "nixpkgs": "nixpkgs_3",
"numbers": "numbers", "numbers": "numbers",
"putex": "putex",
"unstable": "unstable" "unstable": "unstable"
} }
}, },
@ -244,13 +298,28 @@
"type": "github" "type": "github"
} }
}, },
"systems_3": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"unstable": { "unstable": {
"locked": { "locked": {
"lastModified": 1721116560, "lastModified": 1723985069,
"narHash": "sha256-++TYlGMAJM1Q+0nMVaWBSEvEUjRs7ZGiNQOpqbQApCU=", "narHash": "sha256-MGtXhZHLZGKhtZT/MYXBJEuMkZB5DLYjY679EYNL7Es=",
"owner": "NixOS", "owner": "NixOS",
"repo": "nixpkgs", "repo": "nixpkgs",
"rev": "9355fa86e6f27422963132c2c9aeedb0fb963d93", "rev": "ff1c2669bbb4d0dd9e62cc94f0968cfa652ceec1",
"type": "github" "type": "github"
}, },
"original": { "original": {

View File

@ -4,18 +4,19 @@
unstable.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; unstable.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
numbers.url = "git+https://git.strudelline.net/cascade/numbers"; numbers.url = "git+https://git.strudelline.net/cascade/numbers";
interlude.url = "git+https://git.strudelline.net/nix/interlude"; interlude.url = "git+https://git.strudelline.net/nix/interlude";
putex.url = "git+https://git.strudelline.net/james/putex";
nixos-generators = { url = "github:nix-community/nixos-generators"; inputs.nixpkgs.follows = "nixpkgs"; }; nixos-generators = { url = "github:nix-community/nixos-generators"; inputs.nixpkgs.follows = "nixpkgs"; };
deploy-rs.url = "github:serokell/deploy-rs"; deploy-rs.url = "github:serokell/deploy-rs";
}; };
outputs = { self, nixpkgs, unstable, numbers, interlude, nixos-generators, deploy-rs }@inputs: outputs = { self, nixpkgs, unstable, numbers, interlude, putex, nixos-generators, deploy-rs }@inputs:
with builtins; with builtins;
with nixpkgs.lib; with nixpkgs.lib;
with interlude.lib; with interlude.lib;
let let
includableModules = includableModules =
let localModules = "${./.}" + "/modules"; let localModules = "${./.}" + "/modules";
dirContents = readDir (traceVal localModules); dirContents = readDir (localModules);
filenames = attrNames (trace "dirContents: ${toJSON dirContents}" dirContents); filenames = attrNames (dirContents);
dirs = (filter (n: dirContents."${n}" == "directory" && dirs = (filter (n: dirContents."${n}" == "directory" &&
readFileType "${localModules}/${n}/default.nix" == "regular" ) filenames); readFileType "${localModules}/${n}/default.nix" == "regular" ) filenames);
files = concatMap (filterAndStripSuffix ".nix") (filter (n: dirContents."${n}" == "regular") filenames); files = concatMap (filterAndStripSuffix ".nix") (filter (n: dirContents."${n}" == "regular") filenames);
@ -36,12 +37,13 @@
self.nixosModules.vmFormats self.nixosModules.vmFormats
numbers.nixosModules.users numbers.nixosModules.users
self.nixosModules.session self.nixosModules.session
({...}: { putex.nixosModules.default
# fixed values. {
networking.hostName = traceVal name; # global fixed values.
system.stateVersion = "24.05"; networking.hostName = mkForce name;
nix.settings.require-sigs = false; system.stateVersion = mkForce "24.05";
}) nix.settings.require-sigs = mkForce false;
}
] ++ mods; ] ++ mods;
}); });
}; };
@ -106,6 +108,5 @@
[ includableModules ] [ includableModules ]
++ (with numbers.api; map (h: buildMachine h) deployableHosts) ++ (with numbers.api; map (h: buildMachine h) deployableHosts)
++ [(buildMachine' "cascade-installer" [self.nixosModules.installer] {} )] ++ [(buildMachine' "cascade-installer" [self.nixosModules.installer] {} )]
#++ [(buildMachine' "cascade-installer" [] {} )]
); );
} }

23
k3s_reset.sh Normal file
View File

@ -0,0 +1,23 @@
#!/bin/bash
for f in snorlax sobble rowlet;do
ssh $f sudo systemctl stop k3s || true
ssh $f sudo k3s-killall.sh || true
ssh $f sudo rm -rf /var/lib/rancher/k3s /etc/rancher/k3s
done
deploy() {
TARGET="$1"
nixos-rebuild --flake ".#$TARGET" --target-host "$TARGET" switch --impure --use-remote-sudo
}
deploy snorlax
TOKEN="$(ssh snorlax sudo cat /var/lib/rancher/k3s/server/node-token)"
echo "$TOKEN" | ssh sobble "sudo bash -c 'umask 077; cat > /etc/k3s.token'"
echo "$TOKEN" | ssh rowlet "sudo bash -c 'umask 077; cat > /etc/k3s.token'"
deploy sobble
deploy rowlet
import-k3s-creds.sh sobble k0 172.16.1.2
flux bootstrap gitea --hostname=git.strudelline.net --owner=cascade --repository=k0 --token-auth

7
mklocks.sh Normal file
View File

@ -0,0 +1,7 @@
#!/bin/bash
nats() {
command nix run nixpkgs#natscli -- -s 172.16.1.2 "$@"
}
nats stream add locks --defaults --discard-per-subject --subjects='lock.router' --storage=memory --discard=new --max-msgs-per-subject=1

6
modules/_tmpl.nix Normal file
View File

@ -0,0 +1,6 @@
{ config, pkgs, lib, ... }:
{
config = {
};
}

View File

@ -0,0 +1,33 @@
{ config, pkgs, lib, ... }:
{
config = {
systemd.services."cascade-router".unitConfig = {
Wants = [ "sys-subsystem-net-devices-wan0.device" ];
After = [ "sys-subsystem-net-devices-wan0.device" ];
};
services.putex.putexes = {
sec-router = {
start = "/run/current-system/sw/bin/systemctl --no-block start cascade-router.service";
stop = ''
/run/current-system/sw/bin/systemctl stop -f -s 9 cascade-router.service
'';
healthcheck = ''
set -e
cd /sys/class/net
# cat all carrier values we care about,
# filter out the ones that are 1
# if there's anything left, exit 1.
if (for f in wan0 sec0 lan0;do echo "$f $(cat "$f"/carrier)"; done|grep -v 1|grep -q .) ;then
exit 1
fi
exit 0
'';
};
};
virtualisation.libvirtd.allowedBridges = [ "sec0" "lan0" "wan0" ];
};
}

View File

@ -0,0 +1,32 @@
{ config, pkgs, lib, ... }:
{
config = {
#system.activationScripts."arpFilter" = ''
#PATH=${pkgs.procps}/bin:${pkgs.iptables}/bin:$PATH
# sysctl net.ipv4.conf.all.arp_filter=1
# sysctl net.ipv4.conf.default.arp_filter=1
#'';
environment.systemPackages = with pkgs; [
tcpdump
];
networking = {
nat = {
enable = true;
externalInterface = "wan0";
internalInterfaces = [ "lan0" "sec0" ];
};
useHostResolvConf = false;
useNetworkd = true;
useDHCP = false;
interfaces."wan0" = {
useDHCP = true;
#macAddress = "a0:ce:c8:c6:d2:5f";
};
};
system.stateVersion = "24.05";
};
}

View File

@ -0,0 +1,5 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: test-wow

91
modules/corenet.nix Normal file
View File

@ -0,0 +1,91 @@
{config, numbers, pkgs, lib, ...}:
with lib;
let
hasIface = iface: elem iface (numbers.api.hostIfaces config.networking.hostName);
strIfHasIface = iface: s: if hasIface iface then s else "";
attrsetIfHasIface = iface: as: if hasIface iface then as else {};
eltIfHasIface = iface: elt: if hasIface iface then [ elt ] else [];
nameservers = filter (x: x != "") [
"127.0.0.1"
(if config.networking.hostName != "snorlax" then (numbers.api.hostIface "snorlax" "sec0").ip else "")
(if config.networking.hostName != "sobble" then (numbers.api.hostIface "sobble" "sec0").ip else "")
(if config.networking.hostName != "rowlet" then (numbers.api.hostIface "rowlet" "sec0").ip else "")
];
in
{
imports = [
#./pgpool.nix
./udp514.nix
];
services.udp514-journal.enable = true;
services.coredns = {
enable = true;
config = ''
. {
${strIfHasIface "sxxxxec0" "bind sec0"}
${strIfHasIface "xxxxlan0" "bind lan0"}
nsid ${config.networking.hostName}
forward . 172.16.1.8
template IN A server.dns {
answer "{{ .Name }} 0 IN A ${(numbers.api.hostIface config.networking.hostName "sec0").ip}"
}
}
'';
};
services.resolved.enable = false;
#networking.resolvconf.enable = false;
environment.etc."resolv.conf".text = foldl'
(a: s: if s == "" then a else "${a}nameserver ${s}\n")
"" nameservers;
networking.nameservers = nameservers;
system.activationScripts."corenet-flux" = mkIf true ''
ln -sf ${./corenet-flux.yaml} /var/lib/rancher/k3s/server/manifests/corenet-flux.yaml
'';
services.k3s = {
enable = true;
tokenFile = mkIf (config.networking.hostName != "snorlax") "/etc/k3s.token";
serverAddr =
mkIf (config.networking.hostName != "snorlax")
"https://${(numbers.api.hostIface "snorlax" "sec0").ip}:6443";
clusterInit = config.networking.hostName == "snorlax";
extraFlags = (
#" --datastore-endpoint=nats://localhost:4222?noEmbed=true&bucket=k0-kine&replicas=2"+
" --disable=traefik"+
" --disable=local-storage"+
" --cluster-cidr=10.128.0.0/16"+
" --service-cidr=10.129.0.0/16"+
" --flannel-backend=vxlan"+
" --embedded-registry"+
(strIfHasIface "sec0" " --node-ip=${(numbers.api.hostIface config.networking.hostName "sec0").ip}")+
#(strIfHasIface "lan0" " --tls-san=${(numbers.api.hostIface config.networking.hostName "lan0").ip}")+
"");
};
environment.etc."rancher/k3s/registries.yaml".text = ''
mirrors:
"*":
'';
networking.firewall.allowedUDPPorts = [
53 80 443 5432 5001 9898 9999 6443 4222 6222 8222 2379 2380 8472 10250
];
networking.firewall.allowedUDPPortRanges = [
{ from = 5000; to = 32767; }
];
networking.firewall.allowedTCPPorts = [
53 80 443 5432 5001 9898 9999 6443 4222 6222 8222 2379 2380 10250
];
networking.firewall.allowedTCPPortRanges = [
{ from = 5000; to = 32767; }
];
}

View File

@ -7,20 +7,20 @@
{ {
environment.systemPackages = with pkgs; [ environment.systemPackages = with pkgs; [
seatd seatd
emacs-nox #emacs-nox
inetutils inetutils
unzip unzip
buildah buildah
curl curl
vim vim
neovim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default. neovim
wget wget
sshfs sshfs
dig dig
gost gost
elinks elinks
dislocker #dislocker
ntfs3g #ntfs3g
kubectl kubectl
sops sops
git git
@ -32,6 +32,7 @@
brightnessctl brightnessctl
kubernetes-helm kubernetes-helm
ripgrep ripgrep
bridge-utils
nettools nettools
psmisc psmisc

101
modules/pgpool.nix Normal file
View File

@ -0,0 +1,101 @@
{ config, pkgs, lib, ... }:
with lib;
let
cfg = config.services.pgpool;
shq = lib.escapeShellArg;
configFile = pkgs.writeText "pgpool.conf" cfg.config;
in
{
options = {
services.pgpool = {
enable = mkEnableOption "pgpool-II";
config = mkOption {
default = ''
backend_clustering_mode = 'snapshot_isolation'
backend_hostname0 = '127.0.0.1'
backend_port0 = 5432
backend_weight0 = 1
logging_collector = true
log_destination = 'syslog,stderr'
log_min_messages = 'INFO'
'';
example = ''
backend_clustering_mode = 'snapshot_isolation'
backend_hostname0 = '127.0.0.1'
backend_port0 = 5432
backend_weight0 = 1
logging_collector = true
log_destination = 'syslog,stderr'
log_min_messages = 'INFO'
'';
description = ''
Verbatim pgpool.conf to use
'';
};
user = mkOption {
type = types.str;
default = "pgpool";
description = ''
User account under which pgpool runs.
'';
};
group = mkOption {
type = types.str;
default = "pgpool";
description = ''
User group under which pgpool runs.
'';
};
package = mkPackageOption pkgs "pgpool" { };
extraArgs = mkOption {
default = [];
example = [ "-dns.port=53" ];
type = types.listOf types.str;
description = "Extra arguments to pass to coredns.";
};
};
};
config = mkIf cfg.enable {
users.users.${cfg.user} = {
isSystemUser = true;
group = cfg.group;
extraGroups = mkIf config.services.postgresql.enable [ "postgres" ];
};
users.groups.${cfg.group} = {};
environment.etc."pgpool.conf" = {
source = configFile;
};
environment.systemPackages = [ cfg.package ];
systemd.services.pgpool = {
description = "pgpool-II postgresql load balancer and replication manager";
after = [ "network.target" ];
wantedBy = [ "multi-user.target" ];
serviceConfig = {
PermissionsStartOnly = true;
LimitNPROC = 512;
LimitNOFILE = 1048576;
#CapabilityBoundingSet = "cap_net_bind_service";
#AmbientCapabilities = "cap_net_bind_service";
NoNewPrivileges = true;
User = cfg.user;
Group = cfg.group;
PIDFile = "/run/pgpool/pgpool.pid";
RuntimeDirectory = "pgpool";
ExecStart = "${getBin cfg.package}/bin/pgpool ${lib.escapeShellArgs cfg.extraArgs}";
ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
Restart = "no";
Type = "forking";
};
};
};
}

View File

@ -9,23 +9,6 @@
virtualisation = { virtualisation = {
kvmgt.enable = true; kvmgt.enable = true;
libvirtd = {
enable = true;
qemu = {
runAsRoot = true;
verbatimConfig = ''
cgroup_device_acl = ["/dev/kvmfr0", "/dev/kvm"]
'';
swtpm = {
enable = true;
};
};
};
docker = {
enable = true;
enableNvidia = false;
};
containers = { containers = {
enable = true; enable = true;
policy = { policy = {
@ -39,6 +22,8 @@
}; };
}; };
#hardware.nvidia-container-toolkit.enable = true;
services.openssh.enable = true; services.openssh.enable = true;
networking.firewall.enable = true; networking.firewall.enable = true;
@ -50,4 +35,14 @@
systemd.network.wait-online.enable = lib.mkDefault false; systemd.network.wait-online.enable = lib.mkDefault false;
networking.useDHCP = false; networking.useDHCP = false;
#services.tcsd.enable = true;
security.sudo = {
enable = true;
extraRules = [
{ users = [ "%wheel" ];
commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ];
}
];
};
} }

4
modules/stateless-vm.nix Normal file
View File

@ -0,0 +1,4 @@
{
imports = [ ./vm.nix ];
config.virtualisation.diskImage = null;
}

View File

@ -0,0 +1,33 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# either make the lv inside an existing vg like the root luks one
# ----------
# lvcreate -L50G -n sobble-tank-nvme sobble-luks
# --- or ---
# pvcreate /dev/nvme0n2p15
# vgcreate sobble-tank-nvme /dev/nvme0n2p15
# lvcreate -l 100%FREE -n sobble-tank-nvme sobble-tank-nvme
# -- then --
# mkfs.ext4 /dev/sobble-tank-nvme/sobble-tank-nvme
let
m = "${config.networking.hostName}-luks";
n = "${config.networking.hostName}-tank-nvme";
in
{
fileSystems."/tank/nvme" = {
device = "/dev/${m}/${n}";
fsType = "ext4";
};
}

26
modules/tank-nvme.nix Normal file
View File

@ -0,0 +1,26 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# pvcreate /dev/nvme5n7p9
# vgcreate sobble-tank-nvme /dev/nvme5n7p9
# lvcreate -l 100%FREE -n sobble-tank-nvme sobble-tank-nvme
# mkfs.ext4 /dev/sobble-tank-nvme/sobble-tank-nvme
let n = "${config.networking.hostName}-tank-nvme";
in
{
fileSystems."/tank/nvme" = {
device = "/dev/${n}/${n}";
fsType = "ext4";
};
}

28
modules/tank-ssd-luks.nix Normal file
View File

@ -0,0 +1,28 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# pvcreate /dev/sda1
# vgcreate sobble-tank-ssd /dev/sda1
# lvcreate -l 100%FREE -n sobble-tank-ssd sobble-tank-ssd
# mkfs.ext4 /dev/sobble-tank-ssd/sobble-tank-ssd
let
m = "${config.networking.hostName}-luks";
n = "${config.networking.hostName}-tank-ssd";
in
{
fileSystems."/tank/ssd" = {
device = "/dev/${m}/${n}";
fsType = "ext4";
};
}

26
modules/tank-ssd.nix Normal file
View File

@ -0,0 +1,26 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# pvcreate /dev/sda1
# vgcreate sobble-tank-ssd /dev/sda1
# lvcreate -l 100%FREE -n sobble-tank-ssd sobble-tank-ssd
# mkfs.ext4 /dev/sobble-tank-ssd/sobble-tank-ssd
let n = "${config.networking.hostName}-tank-ssd";
in
{
fileSystems."/tank/ssd" = {
device = "/dev/${n}/${n}";
fsType = "ext4";
};
}

19
modules/udp514-pkg.nix Normal file
View File

@ -0,0 +1,19 @@
{ pkgs ? import <nixpkgs> {}, ... }:
with pkgs;
stdenv.mkDerivation {
name = "udp514-journal";
src = fetchFromGitHub {
owner = "eworm-de"; repo = "udp514-journal"; rev = "main";
hash = "sha256-lk2Uz3OemhXd4MMR2zFi54XCQiGjibgvT1iz0a7R1j4=";
};
buildInputs = [ systemd ];
nativeBuildInputs = [ pkg-config multimarkdown ];
buildPhase = ''
make udp514-journal
'';
installPhase = ''
mkdir -p $out/bin
cp udp514-journal $out/bin/udp514-journal
'';
}

48
modules/udp514.nix Normal file
View File

@ -0,0 +1,48 @@
{ config, lib, pkgs, ... }:
with lib;
let udp514-journal = import ./udp514-pkg.nix { inherit pkgs; };
cfg = config.services.udp514-journal;
port = 514;
# not configurable yet.
# cfg.port;
in
{
options = {
services.udp514-journal = {
enable = mkEnableOption "udp514-journal";
openFirewall = mkOption {
default = true;
type = types.bool;
description = "Whether to open the firewall for the specified port.";
};
# this is apparently not configurable yet.
#port = mkOption {
# default = 514;
# type = types.port;
# description = "udp514-journal syslog ingest port";
#};
};
};
config = mkIf cfg.enable {
systemd.services."udp514-journal" = {
description = "udp514-journal syslog to journald adapter";
wantedBy = [ "multi-user.target" ];
serviceConfig = {
DynamicUser = true;
ProtectSystem = "full";
CapabilityBoundingSet = "cap_net_bind_service";
AmbientCapabilities = "cap_net_bind_service";
Type = "notify";
Restart = "always";
ExecStart = "${udp514-journal}/bin/udp514-journal";
ProtectHome = true;
PrivateDevices = true;
};
};
networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ port ];
};
}

27
modules/vm.nix Normal file
View File

@ -0,0 +1,27 @@
{ config, lib, modulesPath, numbers, ... }:
with lib;
let
makeNic = { matchMac, iface, media, ... }:
# because of the bridge logic, br=iface _and_ internal-iface=iface
if media != "eth" then [] else [ "-nic bridge,id=${iface},br=${iface},model=virtio,mac=${matchMac}" ];
makeNicFromHostIface = host: iface: makeNic (numbers.api.hostIface host iface);
makeNics = host: concatMap (makeNicFromHostIface host) (numbers.api.hostIfaces host);
makeQemuNetworkingOptions = host:
(makeNics host) ++ [
# "-net nic,netdev=user.0,model=virtio"
# "-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
];
in
{
imports = [
"${modulesPath}/virtualisation/qemu-vm.nix"
./server.nix
];
config = {
virtualisation.graphics = false;
virtualisation.qemu.networkingOptions = makeQemuNetworkingOptions config.networking.hostName;
};
}

View File

@ -7,7 +7,6 @@
{ {
imports = imports =
[ # Include the results of the hardware scan. [ # Include the results of the hardware scan.
#./hardware-configuration.nix
./lib/packages.nix ./lib/packages.nix
./lib/server.nix ./lib/server.nix
./lib/session.nix ./lib/session.nix