This commit is contained in:
James Andariese 2024-07-26 14:29:08 -05:00
parent 97a0c9035f
commit ec44cba36d
13 changed files with 224 additions and 122 deletions

View File

@ -33,7 +33,7 @@ k3s_reset() {
#spread_token
#deploy snorlax
spread_token
#spread_token
deploy snorlax "$@"
deploy sobble "$@"
deploy rowlet "$@"

View File

@ -175,11 +175,11 @@
},
"nixpkgs_3": {
"locked": {
"lastModified": 1721821769,
"narHash": "sha256-PhmkdTJs2SfqKzSyDB74rDKp1MH4mGk0pG/+WqrnGEw=",
"lastModified": 1722087241,
"narHash": "sha256-2ShmEaFi0kJVOEEu5gmlykN5dwjWYWYUJmlRTvZQRpU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "d0907b75146a0ccc1ec0d6c3db287ec287588ef6",
"rev": "8c50662509100d53229d4be607f1a3a31157fa12",
"type": "github"
},
"original": {
@ -225,11 +225,11 @@
"nixpkgs": "nixpkgs_4"
},
"locked": {
"lastModified": 1721931394,
"narHash": "sha256-LetDlT8SYpcDZURvkHW7OsVzE0QvmVWv+HIbwYsA0Ac=",
"lastModified": 1722138515,
"narHash": "sha256-8iQj7YvgFSStr3HH4PYm0ofrflS+74BxesKMUdtFhnw=",
"ref": "refs/heads/main",
"rev": "16f8054106f73b8cf21ded014ffa42fb4fe47947",
"revCount": 24,
"rev": "b717678d0f964ede087b5bef49bc4ec7ffa1d8d8",
"revCount": 28,
"type": "git",
"url": "https://git.strudelline.net/cascade/numbers"
},
@ -315,11 +315,11 @@
},
"unstable": {
"locked": {
"lastModified": 1721782431,
"narHash": "sha256-UNDpwjYxNXQet/g3mgRLsQ9zxrbm9j2JEvP4ijF3AWs=",
"lastModified": 1722073938,
"narHash": "sha256-OpX0StkL8vpXyWOGUD6G+MA26wAXK6SpT94kLJXo6B4=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "4f02464258baaf54992debfd010a7a3662a25536",
"rev": "e36e9f57337d0ff0cf77aceb58af4c805472bfae",
"type": "github"
},
"original": {

23
k3s_reset.sh Normal file
View File

@ -0,0 +1,23 @@
#!/bin/bash
for f in snorlax sobble rowlet;do
ssh $f sudo systemctl stop k3s || true
ssh $f sudo k3s-killall.sh || true
ssh $f sudo rm -rf /var/lib/rancher/k3s /etc/rancher/k3s
done
deploy() {
TARGET="$1"
nixos-rebuild --flake ".#$TARGET" --target-host "$TARGET" switch --impure --use-remote-sudo
}
deploy snorlax
TOKEN="$(ssh snorlax sudo cat /var/lib/rancher/k3s/server/node-token)"
echo "$TOKEN" | ssh sobble "sudo bash -c 'umask 077; cat > /etc/k3s.token'"
echo "$TOKEN" | ssh rowlet "sudo bash -c 'umask 077; cat > /etc/k3s.token'"
deploy sobble
deploy rowlet
import-k3s-creds.sh sobble k0 172.16.1.2
flux bootstrap gitea --hostname=git.strudelline.net --owner=cascade --repository=k0 --token-auth

View File

@ -27,5 +27,7 @@
'';
};
};
virtualisation.libvirtd.allowedBridges = [ "sec0" "lan0" "wan0" ];
};
}

View File

@ -0,0 +1,5 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: test-wow

View File

@ -8,6 +8,13 @@ strIfHasIface = iface: s: if hasIface iface then s else "";
attrsetIfHasIface = iface: as: if hasIface iface then as else {};
eltIfHasIface = iface: elt: if hasIface iface then [ elt ] else [];
nameservers = filter (x: x != "") [
"127.0.0.1"
(if config.networking.hostName != "snorlax" then (numbers.api.hostIface "snorlax" "sec0").ip else "")
(if config.networking.hostName != "sobble" then (numbers.api.hostIface "sobble" "sec0").ip else "")
(if config.networking.hostName != "rowlet" then (numbers.api.hostIface "rowlet" "sec0").ip else "")
];
in
{
@ -21,106 +28,60 @@ in
enable = true;
config = ''
. {
${strIfHasIface "sec0" "bind sec0"}
${strIfHasIface "lan0" "bind lan0"}
${strIfHasIface "sxxxxec0" "bind sec0"}
${strIfHasIface "xxxxlan0" "bind lan0"}
forward . 172.16.1.8
}
'';
};
services.resolved.enable = false;
#networking.resolvconf.enable = false;
#services.postgresql = {
# enable = true;
# dataDir = "/srv/pgdata";
# settings = {
# default_transaction_isolation = "repeatable read";
# };
# authentication = ''
# host all all 10.127.1.2/29 trust
# '';
# enableTCPIP = true;
#};
environment.etc."resolv.conf".text = foldl'
(a: s: if s == "" then a else "${a}nameserver ${s}\n")
"" nameservers;
networking.nameservers = nameservers;
#systemd.tmpfiles.rules = [
# "d /srv/pgdata 775 postgres postgres -"
#];
#services.pgpool = {
# enable = true;
# config = ''
# backend_clustering_mode = 'snapshot_isolation'
# backend_hostname0 = '10.127.1.2'
# backend_port0 = 5432
# backend_weight0 = 1
# backend_data_directory0 = '/srv/pgdata'
# backend_flag0 = ALLOW_TO_FAILOVER
# backend_hostname1 = '10.127.1.3'
# backend_port1 = 5432
# backend_weight1 = 1
# backend_data_directory1 = '/srv/pgdata'
# backend_flag1 = ALLOW_TO_FAILOVER
# listen_address = '*'
# logging_collector = true
# log_destination = 'syslog,stderr'
# log_min_messages = 'INFO'
# '';
#};
system.activationScripts."corenet-flux" = mkIf true ''
ln -sf ${./corenet-flux.yaml} /var/lib/rancher/k3s/server/manifests/corenet-flux.yaml
'';
services.k3s = {
enable = true;
tokenFile = "/etc/k3s.token";
#serverAddr =
# mkIf (config.networking.hostName != "snorlax")
# "https://${(numbers.api.hostIface "snorlax" "sec0").ip}:6443";
#clusterInit = config.networking.hostName == "snorlax";
tokenFile = mkIf (config.networking.hostName != "snorlax") "/etc/k3s.token";
serverAddr =
mkIf (config.networking.hostName != "snorlax")
"https://${(numbers.api.hostIface "snorlax" "sec0").ip}:6443";
clusterInit = config.networking.hostName == "snorlax";
extraFlags = (
" --datastore-endpoint=nats://localhost:4222?noEmbed=true&bucket=k0-kine&replicas=2,nats://10.127.1.2:4222,nats://10.127.1.3:4222,nats://10.127.1.4:4222"+
#" --datastore-endpoint=nats://localhost:4222?noEmbed=true&bucket=k0-kine&replicas=2"+
" --disable=traefik"+
" --disable=local-storage"+
" --cluster-cidr=10.128.0.0/16"+
" --flannel-backend=host-gw"+
" --service-cidr=10.129.0.0/16"+
" --flannel-backend=vxlan"+
" --embedded-registry"+
(strIfHasIface "sec0" " --node-ip=${(numbers.api.hostIface config.networking.hostName "sec0").ip}")+
(strIfHasIface "lan0" " --node-external-ip=${(numbers.api.hostIface config.networking.hostName "lan0").ip}")+
#(strIfHasIface "lan0" " --tls-san=${(numbers.api.hostIface config.networking.hostName "lan0").ip}")+
"");
#"--node-ip=${config.systemd.network
};
systemd.services.nats-datadir = {
requiredBy = [ "nats.service" ];
before = [ "nats.service" ];
serviceConfig = {
Type = "oneshot";
ExecStart = pkgs.writeScript "nats-datadir" ''
#!${pkgs.bash}/bin/bash
${pkgs.coreutils}/bin/mkdir -p /srv/nats
${pkgs.coreutils}/bin/chown -R nats:nats /srv/nats
${pkgs.coreutils}/bin/chmod 750 /srv/nats
'';
};
};
environment.etc."rancher/k3s/registries.yaml".text = ''
mirrors:
"*":
'';
systemd.services.nats.unitConfig.Requires = [ "systemd-tmpfiles-resetup.service" ];
systemd.services.nats.unitConfig.After = [ "systemd-tmpfiles-resetup.service" ];
services.nats = {
enable = true;
serverName = config.networking.hostName;
dataDir = "/srv/nats";
jetstream = true;
settings = {
cluster = {
name = "cascade";
no_advertise = true;
port = 6222;
routes = [
"nats://10.127.1.2:6222"
"nats://10.127.1.3:6222"
"nats://10.127.1.4:6222"
];
};
http_port = 8222;
};
};
networking.firewall.allowedUDPPorts = [ 53 5432 9898 9999 6443 4222 6222 8222 ];
networking.firewall.allowedTCPPorts = [ 53 5432 9898 9999 6443 4222 6222 8222 ];
networking.firewall.allowedUDPPorts = [
53 80 443 5432 5001 9898 9999 6443 4222 6222 8222 2379 2380 8472 10250
];
networking.firewall.allowedUDPPortRanges = [
{ from = 30000; to = 32767; }
];
networking.firewall.allowedTCPPorts = [
53 80 443 5432 5001 9898 9999 6443 4222 6222 8222 2379 2380 10250
];
networking.firewall.allowedTCPPortRanges = [
{ from = 30000; to = 32767; }
];
}

View File

@ -7,20 +7,20 @@
{
environment.systemPackages = with pkgs; [
seatd
emacs-nox
#emacs-nox
inetutils
unzip
buildah
curl
vim
neovim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
neovim
wget
sshfs
dig
gost
elinks
dislocker
ntfs3g
#dislocker
#ntfs3g
kubectl
sops
git
@ -32,6 +32,7 @@
brightnessctl
kubernetes-helm
ripgrep
bridge-utils
nettools
psmisc

View File

@ -9,18 +9,18 @@
virtualisation = {
kvmgt.enable = true;
libvirtd = {
enable = true;
qemu = {
runAsRoot = true;
verbatimConfig = ''
cgroup_device_acl = ["/dev/kvmfr0", "/dev/kvm"]
'';
swtpm = {
enable = true;
};
};
};
#libvirtd = {
# enable = true;
# qemu = {
# runAsRoot = true;
# verbatimConfig = ''
# cgroup_device_acl = ["/dev/kvmfr0", "/dev/kvm"]
# '';
# swtpm = {
# enable = true;
# };
# };
#};
containers = {
enable = true;
policy = {
@ -34,7 +34,7 @@
};
};
hardware.nvidia-container-toolkit.enable = true;
#hardware.nvidia-container-toolkit.enable = true;
services.openssh.enable = true;
networking.firewall.enable = true;

View File

@ -0,0 +1,33 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# either make the lv inside an existing vg like the root luks one
# ----------
# lvcreate -L50G -n sobble-tank-nvme sobble-luks
# --- or ---
# pvcreate /dev/nvme0n2p15
# vgcreate sobble-tank-nvme /dev/nvme0n2p15
# lvcreate -l 100%FREE -n sobble-tank-nvme sobble-tank-nvme
# -- then --
# mkfs.ext4 /dev/sobble-tank-nvme/sobble-tank-nvme
let
m = "${config.networking.hostName}-luks";
n = "${config.networking.hostName}-tank-nvme";
in
{
fileSystems."/tank/nvme" = {
device = "/dev/${m}/${n}";
fsType = "ext4";
};
}

26
modules/tank-nvme.nix Normal file
View File

@ -0,0 +1,26 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# pvcreate /dev/nvme5n7p9
# vgcreate sobble-tank-nvme /dev/nvme5n7p9
# lvcreate -l 100%FREE -n sobble-tank-nvme sobble-tank-nvme
# mkfs.ext4 /dev/sobble-tank-nvme/sobble-tank-nvme
let n = "${config.networking.hostName}-tank-nvme";
in
{
fileSystems."/tank/nvme" = {
device = "/dev/${n}/${n}";
fsType = "ext4";
};
}

28
modules/tank-ssd-luks.nix Normal file
View File

@ -0,0 +1,28 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# pvcreate /dev/sda1
# vgcreate sobble-tank-ssd /dev/sda1
# lvcreate -l 100%FREE -n sobble-tank-ssd sobble-tank-ssd
# mkfs.ext4 /dev/sobble-tank-ssd/sobble-tank-ssd
let
m = "${config.networking.hostName}-luks";
n = "${config.networking.hostName}-tank-ssd";
in
{
fileSystems."/tank/ssd" = {
device = "/dev/${m}/${n}";
fsType = "ext4";
};
}

26
modules/tank-ssd.nix Normal file
View File

@ -0,0 +1,26 @@
{config,...}:
# to use this, you must have created the lvm devices for the host
# in this example, my hostname is sobble and the disk is /dev/sda:
#
# fdisk /dev/sda
# n # new partition, assuming this is a blank disk.
# # (enter for all defauls until you're back at the prompt)
# t # set type
# 1 # first partition, again assuming this was a blank disk
# 8e # lvm
# w # write and quit
#
# pvcreate /dev/sda1
# vgcreate sobble-tank-ssd /dev/sda1
# lvcreate -l 100%FREE -n sobble-tank-ssd sobble-tank-ssd
# mkfs.ext4 /dev/sobble-tank-ssd/sobble-tank-ssd
let n = "${config.networking.hostName}-tank-ssd";
in
{
fileSystems."/tank/ssd" = {
device = "/dev/${n}/${n}";
fsType = "ext4";
};
}

View File

@ -1,19 +1,16 @@
{ lib, modulesPath, numbers, ... }:
{ config, lib, modulesPath, numbers, ... }:
with lib;
let
makeNic = host: iface:
let { matchMac, iface, media, ... } = numbers.api.hostIface host iface;
in
if media != "eth" then [] else
[
"-nic bridge,id=${iface},br=${iface},model=virtio,mac=${matchMac}"
];
makeNics = host: concatMap (makeNic host) (numbers.api.hostIfaces host);
makeNic = { matchMac, iface, media, ... }:
# because of the bridge logic, br=iface _and_ internal-iface=iface
if media != "eth" then [] else [ "-nic bridge,id=${iface},br=${iface},model=virtio,mac=${matchMac}" ];
makeNicFromHostIface = host: iface: makeNic (numbers.api.hostIface host iface);
makeNics = host: concatMap (makeNicFromHostIface host) (numbers.api.hostIfaces host);
makeQemuNetworkingOptions = host:
(makeNics host) ++ [
"-net nic,netdev=user.0,model=virtio"
"-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
# "-net nic,netdev=user.0,model=virtio"
# "-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
];
in