wip
This commit is contained in:
parent
97a0c9035f
commit
ec44cba36d
|
@ -33,7 +33,7 @@ k3s_reset() {
|
||||||
|
|
||||||
#spread_token
|
#spread_token
|
||||||
#deploy snorlax
|
#deploy snorlax
|
||||||
spread_token
|
#spread_token
|
||||||
deploy snorlax "$@"
|
deploy snorlax "$@"
|
||||||
deploy sobble "$@"
|
deploy sobble "$@"
|
||||||
deploy rowlet "$@"
|
deploy rowlet "$@"
|
||||||
|
|
20
flake.lock
20
flake.lock
|
@ -175,11 +175,11 @@
|
||||||
},
|
},
|
||||||
"nixpkgs_3": {
|
"nixpkgs_3": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1721821769,
|
"lastModified": 1722087241,
|
||||||
"narHash": "sha256-PhmkdTJs2SfqKzSyDB74rDKp1MH4mGk0pG/+WqrnGEw=",
|
"narHash": "sha256-2ShmEaFi0kJVOEEu5gmlykN5dwjWYWYUJmlRTvZQRpU=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "d0907b75146a0ccc1ec0d6c3db287ec287588ef6",
|
"rev": "8c50662509100d53229d4be607f1a3a31157fa12",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
@ -225,11 +225,11 @@
|
||||||
"nixpkgs": "nixpkgs_4"
|
"nixpkgs": "nixpkgs_4"
|
||||||
},
|
},
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1721931394,
|
"lastModified": 1722138515,
|
||||||
"narHash": "sha256-LetDlT8SYpcDZURvkHW7OsVzE0QvmVWv+HIbwYsA0Ac=",
|
"narHash": "sha256-8iQj7YvgFSStr3HH4PYm0ofrflS+74BxesKMUdtFhnw=",
|
||||||
"ref": "refs/heads/main",
|
"ref": "refs/heads/main",
|
||||||
"rev": "16f8054106f73b8cf21ded014ffa42fb4fe47947",
|
"rev": "b717678d0f964ede087b5bef49bc4ec7ffa1d8d8",
|
||||||
"revCount": 24,
|
"revCount": 28,
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "https://git.strudelline.net/cascade/numbers"
|
"url": "https://git.strudelline.net/cascade/numbers"
|
||||||
},
|
},
|
||||||
|
@ -315,11 +315,11 @@
|
||||||
},
|
},
|
||||||
"unstable": {
|
"unstable": {
|
||||||
"locked": {
|
"locked": {
|
||||||
"lastModified": 1721782431,
|
"lastModified": 1722073938,
|
||||||
"narHash": "sha256-UNDpwjYxNXQet/g3mgRLsQ9zxrbm9j2JEvP4ijF3AWs=",
|
"narHash": "sha256-OpX0StkL8vpXyWOGUD6G+MA26wAXK6SpT94kLJXo6B4=",
|
||||||
"owner": "NixOS",
|
"owner": "NixOS",
|
||||||
"repo": "nixpkgs",
|
"repo": "nixpkgs",
|
||||||
"rev": "4f02464258baaf54992debfd010a7a3662a25536",
|
"rev": "e36e9f57337d0ff0cf77aceb58af4c805472bfae",
|
||||||
"type": "github"
|
"type": "github"
|
||||||
},
|
},
|
||||||
"original": {
|
"original": {
|
||||||
|
|
23
k3s_reset.sh
Normal file
23
k3s_reset.sh
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
for f in snorlax sobble rowlet;do
|
||||||
|
ssh $f sudo systemctl stop k3s || true
|
||||||
|
ssh $f sudo k3s-killall.sh || true
|
||||||
|
ssh $f sudo rm -rf /var/lib/rancher/k3s /etc/rancher/k3s
|
||||||
|
done
|
||||||
|
|
||||||
|
deploy() {
|
||||||
|
TARGET="$1"
|
||||||
|
nixos-rebuild --flake ".#$TARGET" --target-host "$TARGET" switch --impure --use-remote-sudo
|
||||||
|
}
|
||||||
|
|
||||||
|
deploy snorlax
|
||||||
|
TOKEN="$(ssh snorlax sudo cat /var/lib/rancher/k3s/server/node-token)"
|
||||||
|
echo "$TOKEN" | ssh sobble "sudo bash -c 'umask 077; cat > /etc/k3s.token'"
|
||||||
|
echo "$TOKEN" | ssh rowlet "sudo bash -c 'umask 077; cat > /etc/k3s.token'"
|
||||||
|
deploy sobble
|
||||||
|
deploy rowlet
|
||||||
|
|
||||||
|
import-k3s-creds.sh sobble k0 172.16.1.2
|
||||||
|
|
||||||
|
flux bootstrap gitea --hostname=git.strudelline.net --owner=cascade --repository=k0 --token-auth
|
|
@ -27,5 +27,7 @@
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
virtualisation.libvirtd.allowedBridges = [ "sec0" "lan0" "wan0" ];
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
5
modules/corenet-flux.yaml
Normal file
5
modules/corenet-flux.yaml
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
---
|
||||||
|
kind: Namespace
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: test-wow
|
|
@ -8,6 +8,13 @@ strIfHasIface = iface: s: if hasIface iface then s else "";
|
||||||
attrsetIfHasIface = iface: as: if hasIface iface then as else {};
|
attrsetIfHasIface = iface: as: if hasIface iface then as else {};
|
||||||
eltIfHasIface = iface: elt: if hasIface iface then [ elt ] else [];
|
eltIfHasIface = iface: elt: if hasIface iface then [ elt ] else [];
|
||||||
|
|
||||||
|
nameservers = filter (x: x != "") [
|
||||||
|
"127.0.0.1"
|
||||||
|
(if config.networking.hostName != "snorlax" then (numbers.api.hostIface "snorlax" "sec0").ip else "")
|
||||||
|
(if config.networking.hostName != "sobble" then (numbers.api.hostIface "sobble" "sec0").ip else "")
|
||||||
|
(if config.networking.hostName != "rowlet" then (numbers.api.hostIface "rowlet" "sec0").ip else "")
|
||||||
|
];
|
||||||
|
|
||||||
in
|
in
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -21,106 +28,60 @@ in
|
||||||
enable = true;
|
enable = true;
|
||||||
config = ''
|
config = ''
|
||||||
. {
|
. {
|
||||||
${strIfHasIface "sec0" "bind sec0"}
|
${strIfHasIface "sxxxxec0" "bind sec0"}
|
||||||
${strIfHasIface "lan0" "bind lan0"}
|
${strIfHasIface "xxxxlan0" "bind lan0"}
|
||||||
forward . 172.16.1.8
|
forward . 172.16.1.8
|
||||||
}
|
}
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
services.resolved.enable = false;
|
||||||
|
#networking.resolvconf.enable = false;
|
||||||
|
|
||||||
#services.postgresql = {
|
environment.etc."resolv.conf".text = foldl'
|
||||||
# enable = true;
|
(a: s: if s == "" then a else "${a}nameserver ${s}\n")
|
||||||
# dataDir = "/srv/pgdata";
|
"" nameservers;
|
||||||
# settings = {
|
networking.nameservers = nameservers;
|
||||||
# default_transaction_isolation = "repeatable read";
|
|
||||||
# };
|
|
||||||
# authentication = ''
|
|
||||||
# host all all 10.127.1.2/29 trust
|
|
||||||
# '';
|
|
||||||
# enableTCPIP = true;
|
|
||||||
#};
|
|
||||||
|
|
||||||
#systemd.tmpfiles.rules = [
|
|
||||||
# "d /srv/pgdata 775 postgres postgres -"
|
|
||||||
#];
|
|
||||||
|
|
||||||
#services.pgpool = {
|
system.activationScripts."corenet-flux" = mkIf true ''
|
||||||
# enable = true;
|
ln -sf ${./corenet-flux.yaml} /var/lib/rancher/k3s/server/manifests/corenet-flux.yaml
|
||||||
# config = ''
|
'';
|
||||||
# backend_clustering_mode = 'snapshot_isolation'
|
|
||||||
# backend_hostname0 = '10.127.1.2'
|
|
||||||
# backend_port0 = 5432
|
|
||||||
# backend_weight0 = 1
|
|
||||||
# backend_data_directory0 = '/srv/pgdata'
|
|
||||||
# backend_flag0 = ALLOW_TO_FAILOVER
|
|
||||||
# backend_hostname1 = '10.127.1.3'
|
|
||||||
# backend_port1 = 5432
|
|
||||||
# backend_weight1 = 1
|
|
||||||
# backend_data_directory1 = '/srv/pgdata'
|
|
||||||
# backend_flag1 = ALLOW_TO_FAILOVER
|
|
||||||
# listen_address = '*'
|
|
||||||
# logging_collector = true
|
|
||||||
# log_destination = 'syslog,stderr'
|
|
||||||
# log_min_messages = 'INFO'
|
|
||||||
# '';
|
|
||||||
#};
|
|
||||||
|
|
||||||
services.k3s = {
|
services.k3s = {
|
||||||
enable = true;
|
enable = true;
|
||||||
tokenFile = "/etc/k3s.token";
|
tokenFile = mkIf (config.networking.hostName != "snorlax") "/etc/k3s.token";
|
||||||
#serverAddr =
|
serverAddr =
|
||||||
# mkIf (config.networking.hostName != "snorlax")
|
mkIf (config.networking.hostName != "snorlax")
|
||||||
# "https://${(numbers.api.hostIface "snorlax" "sec0").ip}:6443";
|
"https://${(numbers.api.hostIface "snorlax" "sec0").ip}:6443";
|
||||||
#clusterInit = config.networking.hostName == "snorlax";
|
clusterInit = config.networking.hostName == "snorlax";
|
||||||
extraFlags = (
|
extraFlags = (
|
||||||
" --datastore-endpoint=nats://localhost:4222?noEmbed=true&bucket=k0-kine&replicas=2,nats://10.127.1.2:4222,nats://10.127.1.3:4222,nats://10.127.1.4:4222"+
|
#" --datastore-endpoint=nats://localhost:4222?noEmbed=true&bucket=k0-kine&replicas=2"+
|
||||||
" --disable=traefik"+
|
" --disable=traefik"+
|
||||||
" --disable=local-storage"+
|
" --disable=local-storage"+
|
||||||
" --cluster-cidr=10.128.0.0/16"+
|
" --cluster-cidr=10.128.0.0/16"+
|
||||||
" --flannel-backend=host-gw"+
|
" --service-cidr=10.129.0.0/16"+
|
||||||
|
" --flannel-backend=vxlan"+
|
||||||
|
" --embedded-registry"+
|
||||||
(strIfHasIface "sec0" " --node-ip=${(numbers.api.hostIface config.networking.hostName "sec0").ip}")+
|
(strIfHasIface "sec0" " --node-ip=${(numbers.api.hostIface config.networking.hostName "sec0").ip}")+
|
||||||
(strIfHasIface "lan0" " --node-external-ip=${(numbers.api.hostIface config.networking.hostName "lan0").ip}")+
|
#(strIfHasIface "lan0" " --tls-san=${(numbers.api.hostIface config.networking.hostName "lan0").ip}")+
|
||||||
"");
|
"");
|
||||||
#"--node-ip=${config.systemd.network
|
|
||||||
};
|
|
||||||
|
|
||||||
systemd.services.nats-datadir = {
|
|
||||||
requiredBy = [ "nats.service" ];
|
|
||||||
before = [ "nats.service" ];
|
|
||||||
serviceConfig = {
|
|
||||||
Type = "oneshot";
|
|
||||||
ExecStart = pkgs.writeScript "nats-datadir" ''
|
|
||||||
#!${pkgs.bash}/bin/bash
|
|
||||||
${pkgs.coreutils}/bin/mkdir -p /srv/nats
|
|
||||||
${pkgs.coreutils}/bin/chown -R nats:nats /srv/nats
|
|
||||||
${pkgs.coreutils}/bin/chmod 750 /srv/nats
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
systemd.services.nats.unitConfig.Requires = [ "systemd-tmpfiles-resetup.service" ];
|
environment.etc."rancher/k3s/registries.yaml".text = ''
|
||||||
systemd.services.nats.unitConfig.After = [ "systemd-tmpfiles-resetup.service" ];
|
mirrors:
|
||||||
|
"*":
|
||||||
|
'';
|
||||||
|
|
||||||
services.nats = {
|
networking.firewall.allowedUDPPorts = [
|
||||||
enable = true;
|
53 80 443 5432 5001 9898 9999 6443 4222 6222 8222 2379 2380 8472 10250
|
||||||
serverName = config.networking.hostName;
|
];
|
||||||
dataDir = "/srv/nats";
|
networking.firewall.allowedUDPPortRanges = [
|
||||||
jetstream = true;
|
{ from = 30000; to = 32767; }
|
||||||
settings = {
|
];
|
||||||
cluster = {
|
networking.firewall.allowedTCPPorts = [
|
||||||
name = "cascade";
|
53 80 443 5432 5001 9898 9999 6443 4222 6222 8222 2379 2380 10250
|
||||||
no_advertise = true;
|
];
|
||||||
port = 6222;
|
networking.firewall.allowedTCPPortRanges = [
|
||||||
routes = [
|
{ from = 30000; to = 32767; }
|
||||||
"nats://10.127.1.2:6222"
|
];
|
||||||
"nats://10.127.1.3:6222"
|
|
||||||
"nats://10.127.1.4:6222"
|
|
||||||
];
|
|
||||||
};
|
|
||||||
http_port = 8222;
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
networking.firewall.allowedUDPPorts = [ 53 5432 9898 9999 6443 4222 6222 8222 ];
|
|
||||||
networking.firewall.allowedTCPPorts = [ 53 5432 9898 9999 6443 4222 6222 8222 ];
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,20 +7,20 @@
|
||||||
{
|
{
|
||||||
environment.systemPackages = with pkgs; [
|
environment.systemPackages = with pkgs; [
|
||||||
seatd
|
seatd
|
||||||
emacs-nox
|
#emacs-nox
|
||||||
inetutils
|
inetutils
|
||||||
unzip
|
unzip
|
||||||
buildah
|
buildah
|
||||||
curl
|
curl
|
||||||
vim
|
vim
|
||||||
neovim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
|
neovim
|
||||||
wget
|
wget
|
||||||
sshfs
|
sshfs
|
||||||
dig
|
dig
|
||||||
gost
|
gost
|
||||||
elinks
|
elinks
|
||||||
dislocker
|
#dislocker
|
||||||
ntfs3g
|
#ntfs3g
|
||||||
kubectl
|
kubectl
|
||||||
sops
|
sops
|
||||||
git
|
git
|
||||||
|
@ -32,6 +32,7 @@
|
||||||
brightnessctl
|
brightnessctl
|
||||||
kubernetes-helm
|
kubernetes-helm
|
||||||
ripgrep
|
ripgrep
|
||||||
|
bridge-utils
|
||||||
nettools
|
nettools
|
||||||
psmisc
|
psmisc
|
||||||
|
|
||||||
|
|
|
@ -9,18 +9,18 @@
|
||||||
|
|
||||||
virtualisation = {
|
virtualisation = {
|
||||||
kvmgt.enable = true;
|
kvmgt.enable = true;
|
||||||
libvirtd = {
|
#libvirtd = {
|
||||||
enable = true;
|
# enable = true;
|
||||||
qemu = {
|
# qemu = {
|
||||||
runAsRoot = true;
|
# runAsRoot = true;
|
||||||
verbatimConfig = ''
|
# verbatimConfig = ''
|
||||||
cgroup_device_acl = ["/dev/kvmfr0", "/dev/kvm"]
|
# cgroup_device_acl = ["/dev/kvmfr0", "/dev/kvm"]
|
||||||
'';
|
# '';
|
||||||
swtpm = {
|
# swtpm = {
|
||||||
enable = true;
|
# enable = true;
|
||||||
};
|
# };
|
||||||
};
|
# };
|
||||||
};
|
#};
|
||||||
containers = {
|
containers = {
|
||||||
enable = true;
|
enable = true;
|
||||||
policy = {
|
policy = {
|
||||||
|
@ -34,7 +34,7 @@
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
hardware.nvidia-container-toolkit.enable = true;
|
#hardware.nvidia-container-toolkit.enable = true;
|
||||||
|
|
||||||
services.openssh.enable = true;
|
services.openssh.enable = true;
|
||||||
networking.firewall.enable = true;
|
networking.firewall.enable = true;
|
||||||
|
|
33
modules/tank-nvme-luks.nix
Normal file
33
modules/tank-nvme-luks.nix
Normal file
|
@ -0,0 +1,33 @@
|
||||||
|
{config,...}:
|
||||||
|
|
||||||
|
# to use this, you must have created the lvm devices for the host
|
||||||
|
# in this example, my hostname is sobble and the disk is /dev/sda:
|
||||||
|
#
|
||||||
|
# fdisk /dev/sda
|
||||||
|
# n # new partition, assuming this is a blank disk.
|
||||||
|
# # (enter for all defauls until you're back at the prompt)
|
||||||
|
# t # set type
|
||||||
|
# 1 # first partition, again assuming this was a blank disk
|
||||||
|
# 8e # lvm
|
||||||
|
# w # write and quit
|
||||||
|
#
|
||||||
|
# either make the lv inside an existing vg like the root luks one
|
||||||
|
# ----------
|
||||||
|
# lvcreate -L50G -n sobble-tank-nvme sobble-luks
|
||||||
|
# --- or ---
|
||||||
|
# pvcreate /dev/nvme0n2p15
|
||||||
|
# vgcreate sobble-tank-nvme /dev/nvme0n2p15
|
||||||
|
# lvcreate -l 100%FREE -n sobble-tank-nvme sobble-tank-nvme
|
||||||
|
# -- then --
|
||||||
|
# mkfs.ext4 /dev/sobble-tank-nvme/sobble-tank-nvme
|
||||||
|
|
||||||
|
let
|
||||||
|
m = "${config.networking.hostName}-luks";
|
||||||
|
n = "${config.networking.hostName}-tank-nvme";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
fileSystems."/tank/nvme" = {
|
||||||
|
device = "/dev/${m}/${n}";
|
||||||
|
fsType = "ext4";
|
||||||
|
};
|
||||||
|
}
|
26
modules/tank-nvme.nix
Normal file
26
modules/tank-nvme.nix
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
{config,...}:
|
||||||
|
|
||||||
|
# to use this, you must have created the lvm devices for the host
|
||||||
|
# in this example, my hostname is sobble and the disk is /dev/sda:
|
||||||
|
#
|
||||||
|
# fdisk /dev/sda
|
||||||
|
# n # new partition, assuming this is a blank disk.
|
||||||
|
# # (enter for all defauls until you're back at the prompt)
|
||||||
|
# t # set type
|
||||||
|
# 1 # first partition, again assuming this was a blank disk
|
||||||
|
# 8e # lvm
|
||||||
|
# w # write and quit
|
||||||
|
#
|
||||||
|
# pvcreate /dev/nvme5n7p9
|
||||||
|
# vgcreate sobble-tank-nvme /dev/nvme5n7p9
|
||||||
|
# lvcreate -l 100%FREE -n sobble-tank-nvme sobble-tank-nvme
|
||||||
|
# mkfs.ext4 /dev/sobble-tank-nvme/sobble-tank-nvme
|
||||||
|
|
||||||
|
let n = "${config.networking.hostName}-tank-nvme";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
fileSystems."/tank/nvme" = {
|
||||||
|
device = "/dev/${n}/${n}";
|
||||||
|
fsType = "ext4";
|
||||||
|
};
|
||||||
|
}
|
28
modules/tank-ssd-luks.nix
Normal file
28
modules/tank-ssd-luks.nix
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
{config,...}:
|
||||||
|
|
||||||
|
# to use this, you must have created the lvm devices for the host
|
||||||
|
# in this example, my hostname is sobble and the disk is /dev/sda:
|
||||||
|
#
|
||||||
|
# fdisk /dev/sda
|
||||||
|
# n # new partition, assuming this is a blank disk.
|
||||||
|
# # (enter for all defauls until you're back at the prompt)
|
||||||
|
# t # set type
|
||||||
|
# 1 # first partition, again assuming this was a blank disk
|
||||||
|
# 8e # lvm
|
||||||
|
# w # write and quit
|
||||||
|
#
|
||||||
|
# pvcreate /dev/sda1
|
||||||
|
# vgcreate sobble-tank-ssd /dev/sda1
|
||||||
|
# lvcreate -l 100%FREE -n sobble-tank-ssd sobble-tank-ssd
|
||||||
|
# mkfs.ext4 /dev/sobble-tank-ssd/sobble-tank-ssd
|
||||||
|
|
||||||
|
let
|
||||||
|
m = "${config.networking.hostName}-luks";
|
||||||
|
n = "${config.networking.hostName}-tank-ssd";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
fileSystems."/tank/ssd" = {
|
||||||
|
device = "/dev/${m}/${n}";
|
||||||
|
fsType = "ext4";
|
||||||
|
};
|
||||||
|
}
|
26
modules/tank-ssd.nix
Normal file
26
modules/tank-ssd.nix
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
{config,...}:
|
||||||
|
|
||||||
|
# to use this, you must have created the lvm devices for the host
|
||||||
|
# in this example, my hostname is sobble and the disk is /dev/sda:
|
||||||
|
#
|
||||||
|
# fdisk /dev/sda
|
||||||
|
# n # new partition, assuming this is a blank disk.
|
||||||
|
# # (enter for all defauls until you're back at the prompt)
|
||||||
|
# t # set type
|
||||||
|
# 1 # first partition, again assuming this was a blank disk
|
||||||
|
# 8e # lvm
|
||||||
|
# w # write and quit
|
||||||
|
#
|
||||||
|
# pvcreate /dev/sda1
|
||||||
|
# vgcreate sobble-tank-ssd /dev/sda1
|
||||||
|
# lvcreate -l 100%FREE -n sobble-tank-ssd sobble-tank-ssd
|
||||||
|
# mkfs.ext4 /dev/sobble-tank-ssd/sobble-tank-ssd
|
||||||
|
|
||||||
|
let n = "${config.networking.hostName}-tank-ssd";
|
||||||
|
in
|
||||||
|
{
|
||||||
|
fileSystems."/tank/ssd" = {
|
||||||
|
device = "/dev/${n}/${n}";
|
||||||
|
fsType = "ext4";
|
||||||
|
};
|
||||||
|
}
|
|
@ -1,19 +1,16 @@
|
||||||
{ lib, modulesPath, numbers, ... }:
|
{ config, lib, modulesPath, numbers, ... }:
|
||||||
with lib;
|
with lib;
|
||||||
|
|
||||||
let
|
let
|
||||||
makeNic = host: iface:
|
makeNic = { matchMac, iface, media, ... }:
|
||||||
let { matchMac, iface, media, ... } = numbers.api.hostIface host iface;
|
# because of the bridge logic, br=iface _and_ internal-iface=iface
|
||||||
in
|
if media != "eth" then [] else [ "-nic bridge,id=${iface},br=${iface},model=virtio,mac=${matchMac}" ];
|
||||||
if media != "eth" then [] else
|
makeNicFromHostIface = host: iface: makeNic (numbers.api.hostIface host iface);
|
||||||
[
|
makeNics = host: concatMap (makeNicFromHostIface host) (numbers.api.hostIfaces host);
|
||||||
"-nic bridge,id=${iface},br=${iface},model=virtio,mac=${matchMac}"
|
|
||||||
];
|
|
||||||
makeNics = host: concatMap (makeNic host) (numbers.api.hostIfaces host);
|
|
||||||
makeQemuNetworkingOptions = host:
|
makeQemuNetworkingOptions = host:
|
||||||
(makeNics host) ++ [
|
(makeNics host) ++ [
|
||||||
"-net nic,netdev=user.0,model=virtio"
|
# "-net nic,netdev=user.0,model=virtio"
|
||||||
"-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
|
# "-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
|
||||||
];
|
];
|
||||||
in
|
in
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user