true it on up y'all

This commit is contained in:
James Andariese 2023-12-20 16:40:16 -06:00
parent d9b5335739
commit a27d8dded3
179 changed files with 13550 additions and 903 deletions

1
.gitignore vendored
View File

@ -6,3 +6,4 @@
/local-config.sh /local-config.sh
charts/ charts/
/old/ /old/
/deleted/

View File

@ -2,7 +2,7 @@ operator-sdk 1.19.1
kubectl 1.25.9 kubectl 1.25.9
terraform 1.1.9 terraform 1.1.9
kubectx 0.9.4 kubectx 0.9.4
cmctl 1.8.0 cmctl 1.13.2
helm 3.8.2 helm 3.8.2
k3sup 0.11.3 k3sup 0.11.3
krew 0.4.3 krew 0.4.3

View File

@ -0,0 +1,104 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: audiobookshelf
namespace: audiobookshelf
spec:
ingressClassName: haproxy
rules:
- host: audiobooks.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: audiobookshelf
port:
number: 80
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: audiobookshelf-data
namespace: audiobookshelf
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: ssd
volumeMode: Filesystem
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: audiobookshelf
name: audiobookshelf
spec:
replicas: 1
selector:
matchLabels:
app: audiobookshelf
strategy:
type: Recreate
template:
metadata:
labels:
app: audiobookshelf
spec:
terminationGracePeriodSeconds: 0
restartPolicy: Always
volumes:
- name: data
persistentVolumeClaim:
claimName: audiobookshelf-data
- name: podcasts
nfs:
server: 172.16.18.1
path: /volume1/podcasts
- name: audiobooks
nfs:
server: 172.16.18.1
path: /volume1/audiobooks
containers:
- name: audiobookshelf
image: ghcr.io/advplyr/audiobookshelf:2.4.4
env: []
volumeMounts:
- mountPath: /audiobooks
name: audiobooks
- mountPath: /podcasts
name: podcasts
- mountPath: /config
name: data
subPath: config
- mountPath: /metadata
name: data
subPath: metadata
#securityContext:
# capabilities:
# add: ["NET_ADMIN","SYS_TIME"]
---
apiVersion: v1
kind: Service
metadata:
labels:
app: audiobookshelf
name: audiobookshelf
namespace: audiobookshelf
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: audiobookshelf
port: 80
protocol: TCP
targetPort: 80
selector:
app: audiobookshelf
sessionAffinity: None
type: ClusterIP

1
budibase/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
_helm-output*.json

View File

@ -1,51 +0,0 @@
---
apiVersion: v1
kind: Namespace
metadata:
labels:
wildcard-tls.kn8v.com/copy: "true"
name: budibase
---
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: budibase
namespace: argocd
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
destination:
server: "https://kubernetes.default.svc"
namespace: budibase
syncPolicy:
automated:
prune: true
selfHeal: true
source:
chart: budibase
repoURL: https://budibase.github.io/budibase/
targetRevision: 2.8.10
helm:
values: |-
globals:
appVersion: v2.8.10
ingress:
nginx: false
className: haproxy
annotations:
haproxy-ingress.github.io/ssl-redirect: "true"
hosts:
- host: bb.strudelline.net
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-service
port:
number: 10000
tls:
- hosts:
- bb.strudelline.net
secretName: wildcard-tls

3
budibase/deploy.sh Normal file
View File

@ -0,0 +1,3 @@
helm repo add budibase https://budibase.github.io/budibase/
helm repo update
helm upgrade -i -n budibase --create-namespace budibase budibase/budibase -f values.yaml --post-renderer ./post-process.sh

1
budibase/diff.sh Normal file
View File

@ -0,0 +1 @@
helm diff upgrade --install -n budibase budibase budibase/budibase -f values.yaml --post-renderer ./post-process.sh --normalize-manifests "$@"

View File

@ -0,0 +1,13 @@
if (
.kind? == "Deployment"
and .apiVersion? == "apps/v1"
and (.metadata.name? == "app-service" or .metadata.name? == "worker-service")
) then
.spec.template.spec.containers //= []
| .spec.template.spec.containers[0].volumeMounts //= []
| .spec.template.spec.containers[0].volumeMounts += [{"mountPath":"/root","name":"persist"}]
| .spec.template.spec.volumes //= []
| .spec.template.spec.volumes += [{"name":"persist","persistentVolumeClaim":{"claimName":"budibase-persist"}}]
else
.
end

View File

@ -0,0 +1,22 @@
if (.kind? == "Deployment" and .apiVersion? == "apps/v1" and
(.metadata.name? == "app-service" or .metadata.name? == "worker-service")) then
.spec.template.spec.containers //= []
| .spec.template.spec.containers += [{
"name": "kubectl-proxy",
"image": "bitnami/kubectl:1.28.2",
"args": ["proxy", "--token", "$(TOKEN)"],
"env": [
{
"name": "TOKEN",
"valueFrom": {
"secretKeyRef": {
"name": "budibase-sa",
"key": "token"
}
}
}
]
}]
else
.
end

View File

@ -0,0 +1,10 @@
if (
.kind? == "Deployment"
and .apiVersion? == "apps/v1"
and (.metadata.name? == "app-service" or .metadata.name? == "proxy-service")
) then
.spec.template.spec.containers[0].env //= []
| .spec.template.spec.containers[0].env += [{"name":"HTTP_MB_LIMIT","value":"250"}] # ,{"name":"","value":"10000"}]
else
.
end

23
budibase/post-process.sh Executable file
View File

@ -0,0 +1,23 @@
#!/bin/bash
INPUT="$(cat|yq -o json .|jq .)"
I=0
echo "$INPUT" > _helm-output${I}.json;I=$((I+1))
for f in helm-jq/*.jq;do
INPUT="$(echo "$INPUT" | jq "$(cat "$f")")"
echo "$INPUT" > _helm-output${I}.json;I=$((I+1))
done
(
echo "$INPUT"
for f in static/*.yaml;do
cat "$f" | yq -o json .
done
) \
| jq -c . \
| while read -r R;do echo ---;echo "$R";done \
| awk 'll!="---" || $0!="---" {print} {ll=$0}' \
| yq -P .

View File

@ -0,0 +1,13 @@
---
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
metadata:
name: quasi-base64
namespace: budibase
spec:
length: 20
digits: 5
symbols: 1
symbolCharacters: "-_"
noUpper: false
allowRepeat: true

39
budibase/secrets.yaml Normal file
View File

@ -0,0 +1,39 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: budibase-budibase
namespace: budibase
annotations:
meta.helm.sh/release-name: budibase
meta.helm.sh/release-namespace: budibase
app.kubernetes.io/managed-by: Helm
spec:
refreshInterval: "0"
target:
name: budibase-budibase
dataFrom:
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: quasi-base64
rewrite: ["regexp": {"source": ".*", "target": "jwtSecret"}]
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: quasi-base64
rewrite: ["regexp": {"source": ".*", "target": "internalApiKey"}]
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: quasi-base64
rewrite: ["regexp": {"source": ".*", "target": "objectStoreAccess"}]
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: quasi-base64
rewrite: ["regexp": {"source": ".*", "target": "objectStoreSecret"}]

View File

@ -0,0 +1,19 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: budibase
namespace: budibase
spec:
ingressClassName: haproxy
rules:
- host: bb.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: proxy-service
port:
number: 10000

14
budibase/static/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: budibase-persist
namespace: budibase
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: ssd
volumeMode: Filesystem

42
budibase/static/sa.yaml Normal file
View File

@ -0,0 +1,42 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: budibase
namespace: budibase
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: budibase
name: budibase
rules:
- apiGroups: ["*"]
resources:
- "*"
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
creationTimestamp: null
name: budibase-rolebinding
namespace: budibase
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: budibase
subjects:
- kind: ServiceAccount
name: budibase
namespace: budibase
---
apiVersion: v1
kind: Secret
metadata:
name: budibase-sa
namespace: budibase
annotations:
kubernetes.io/service-account.name: budibase
type: kubernetes.io/service-account-token

5
budibase/values.yaml Normal file
View File

@ -0,0 +1,5 @@
globals:
createSecrets: false
ingress:
enabled: false
nginx: false

29
cascade/br0-static.yaml Normal file
View File

@ -0,0 +1,29 @@
---
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
name: br0-static
namespace: cascade
annotations:
k8s.v1.cni.cncf.io/resourceName: bridge.network.kubevirt.io/br0
spec:
config: >
{
"cniVersion": "0.3.1",
"name": "br0-static",
"plugins": [{
"type": "bridge",
"bridge": "br0",
"ipam": {
"type": "static",
"routes": [
{ "dst": "0.0.0.0/0", "gw": "172.16.1.1" }
],
"dns": {
"nameservers" : ["172.16.1.8"],
"domain": "cascade.strudelline.net",
"search": [ "cascade.strudelline.net" ]
}
}
}]
}

19
cascade/bridge.yaml Normal file
View File

@ -0,0 +1,19 @@
---
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
name: br0
namespace: cascade
annotations:
k8s.v1.cni.cncf.io/resourceName: bridge.network.kubevirt.io/br0
spec:
config: >
{
"cniVersion": "0.3.1",
"name": "br0",
"plugins": [{
"type": "bridge",
"bridge": "br0",
"ipam": {}
}]
}

19
cascade/dmz0.yaml Normal file
View File

@ -0,0 +1,19 @@
---
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
name: dmz0
namespace: cascade
annotations:
k8s.v1.cni.cncf.io/resourceName: bridge.network.kubevirt.io/dmz0
spec:
config: >
{
"cniVersion": "0.3.1",
"name": "dmz0",
"plugins": [{
"type": "bridge",
"bridge": "dmz0",
"ipam": {}
}]
}

7
cascade/ns.yaml Normal file
View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: cascade
spec: {}
status: {}

19
cascade/private0.yaml Normal file
View File

@ -0,0 +1,19 @@
---
apiVersion: k8s.cni.cncf.io/v1
kind: NetworkAttachmentDefinition
metadata:
name: private0
namespace: cascade
annotations:
k8s.v1.cni.cncf.io/resourceName: bridge.network.kubevirt.io/private0
spec:
config: >
{
"cniVersion": "0.3.1",
"name": "private0",
"plugins": [{
"type": "bridge",
"bridge": "private0",
"ipam": {}
}]
}

14
cascade/router-pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: router-root
namespace: cascade
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 44023414784
volumeName: asdf
storageClassName: ssd
volumeMode: Filesystem

45
cascade/router.yaml Normal file
View File

@ -0,0 +1,45 @@
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: router
namespace: cascade
spec:
running: true
template:
spec:
terminationGracePeriodSeconds: 0
domain:
cpu:
model: Westmere-IBRS
cores: 3
threads: 3
sockets: 1
resources:
requests:
cpu: 1500m
memory: 4G
devices:
interfaces:
- name: dmz0
bridge: {}
macAddress: a0:ce:c8:c6:d2:5f
model: virtio
- name: br0
bridge: {}
model: virtio
disks:
- name: root
disk:
bus: virtio
networks:
- name: br0
multus:
networkName: br0
- name: dmz0
multus:
networkName: dmz0
volumes:
- persistentVolumeClaim:
claimName: router-root
name: root

42
cascade/xerneas.yaml Normal file
View File

@ -0,0 +1,42 @@
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: xerneas
namespace: cascade
spec:
running: true
template:
spec:
terminationGracePeriodSeconds: 30
domain:
resources:
requests:
memory: 1700M
devices:
interfaces:
- name: br0
bridge: {}
macAddress: 00:15:5d:40:de:1c
model: e1000
disks:
- name: pvdisk
disk:
bus: sata
features:
smm:
enabled: true
firmware:
bootloader:
efi: {}
nodeSelector:
kubernetes.io/hostname: chimecho
networks:
- name: br0
multus:
networkName: br0
volumes:
- name: pvdisk
persistentVolumeClaim:
claimName: xerneas-pvc
readOnly: false

42
cascade/yveltal.yaml Normal file
View File

@ -0,0 +1,42 @@
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: yveltal
namespace: cascade
spec:
running: true
template:
spec:
terminationGracePeriodSeconds: 30
domain:
resources:
requests:
memory: 1500M
devices:
interfaces:
- name: br0
bridge: {}
macAddress: 00:15:5d:40:de:20
model: e1000
disks:
- name: pvdisk
disk:
bus: sata
features:
smm:
enabled: true
firmware:
bootloader:
efi: {}
nodeSelector:
kubernetes.io/hostname: chimecho
networks:
- name: br0
multus:
networkName: br0
volumes:
- name: pvdisk
persistentVolumeClaim:
claimName: yveltal-pvc
readOnly: false

2
cert-manager/deploy.sh Normal file
View File

@ -0,0 +1,2 @@
helm repo add jetstack https://charts.jetstack.io
helm upgrade -i --create-namespace -n cert-manager cert-manager jetstack/cert-manager -f values.yaml

1
cert-manager/diff.sh Normal file
View File

@ -0,0 +1 @@
helm diff upgrade -n cert-manager cert-manager jetstack/cert-manager -f values.yaml

6
cert-manager/values.yaml Normal file
View File

@ -0,0 +1,6 @@
extraArgs:
- --dns01-recursive-nameservers-only
- --dns01-recursive-nameservers=1.1.1.1:53
ingressShim.defaultIssuerKind: ClusterIssuer
ingressShim.defaultIssuerName: zerossl
installCRDs: "true"

View File

@ -0,0 +1,11 @@
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: coredns
namespace: metallb-system
spec:
addresses:
- 172.16.1.9/32
- 172.16.2.9/32
autoAssign: false
avoidBuggyIPs: false

1
coredns/deploy-dev.sh Normal file
View File

@ -0,0 +1 @@
helm upgrade -i -n coredns-dev --create-namespace coredns-dev coredns/coredns -f values-dev.yaml

2
coredns/deploy.sh Normal file
View File

@ -0,0 +1,2 @@
helm repo add coredns https://coredns.github.io/helm
helm upgrade -i -n coredns --create-namespace coredns coredns/coredns -f values.yaml

44
coredns/test.sh Normal file
View File

@ -0,0 +1,44 @@
black() { echo -ne '\033[0;30m'; }
red() { echo -ne '\033[0;31m'; }
green() { echo -ne '\033[0;32m'; }
yellow() { echo -ne '\033[0;33m'; }
blue() { echo -ne '\033[0;34m'; }
purple() { echo -ne '\e[0;033;35m'; }
cyan() { echo -ne '\033[0;36m'; }
white() { echo -ne '\033[0;37m'; }
bold() { echo -ne '\033[1m'; }
uncolor() { echo -ne '\033[0m'; }
EIGHTYDOTS="................................................................................"
EIGHTYEQUALS="$(echo -n "$EIGHTYDOTS" | tr . =)"
EIGHTYDASHES="$(echo -n "$EIGHTYDOTS" | tr . -)"
function _time {
red
echo "$EIGHTYEQUALS"
yellow
echo -n "$EIGHTYDASHES"
echo -e "\r--- $@ "
uncolor
export TIMEFORMAT="%4R real %4U user %4S system"
time $@
}
blue;bold
echo
echo "starting tests of the DNS subsystems"
date
echo
_time dig +short @172.16.1.8 xerneas.cascade.strudelline.net
_time dig +short @172.16.1.9 xerneas.cascade.strudelline.net
_time dig +short @172.16.1.8 google.com.cascade.strudelline.net
_time dig +short @172.16.1.9 google.com.cascade.strudelline.net
_time dig +short @172.16.1.8 google.com
_time dig +short @172.16.1.9 google.com
_time dig +short @172.16.1.9 $RANDOM$RANDOM.strudelline.net
_time dig +short @172.16.1.1 $RANDOM$RANDOM.strudelline.net
red;echo "$EIGHTYEQUALS"
uncolor

107
coredns/values-dev.yaml Normal file
View File

@ -0,0 +1,107 @@
replicaCount: 3
servers:
- zones:
- zone: .
port: 53
# If serviceType is nodePort you can specify nodePort here
# nodePort: 30053
# hostPort: 53
plugins:
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
configBlock: |-
lameduck 5s
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
- name: transfer
configBlock: |-
to *
- name: k8s_external
parameters: k
configBlock: |-
fallthrough
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
#- name: k8s_gateway
# parameters: cluster.gateway
# configBlock: |-
# resources Ingress
# ttl 10
# individual hosts (full domains but still just hosts)
- {"parameters": "IN A harbor.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN A 172.16.17.115\"", "name": "template"}
- {"parameters": "IN A frigate.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN A 172.16.17.33\"", "name": "template"}
#- {"parameters": "IN A email.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN CNAME mailgun.org.\"", "name": "template"}
#- {"parameters": "IN A pbx.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN A 172.16.56.1\"", "name": "template"}
# werts.us
- name: template
parameters: IN A werts.us
configBlock: answer "{{ .Name }} 60 IN A 172.16.17.80"
# minio.strudelline.net
- name: template
parameters: IN A minio.strudelline.net
configBlock: answer "{{ .Name }} 60 IN A 172.16.17.80"
# cascade.strudelline.net
- name: template
parameters: IN A cascade.strudelline.net
configBlock: |
match ^cascade[.]strudelline[.]net[.]$
answer "{{ .Name }} 60 IN A 172.16.34.1"
answer "{{ .Name }} 60 IN A 172.16.33.1"
fallthrough
# *.strudelline.net
- name: template
parameters: IN A strudelline.net
configBlock: |
match ^(?P<name>[^.]*)[.]strudelline[.]net[.]$
answer "{{ .Name }} 60 IN A 172.16.17.80"
fallthrough
# BYPASS FAMILY FILTER FOR SOME SITES
- name: forward
parameters: myrunningman.com 172.16.1.53:153
# *.cascade.strudelline.net
- name: forward
parameters: in-addr.arpa 172.16.33.1 172.16.34.1
- name: forward
parameters: cascade.strudelline.net 172.16.33.1 172.16.34.1
- name: forward
parameters: . 172.16.1.53:53 172.16.1.53:54
configBlock: |
force_tcp
- name: loop
- name: reload
- name: nsid
parameters: "coredns-ext"
- name: cache
parameters: 30
- name: cancel
- name: whoami
- name: loadbalance
- name: log
- name: minimal
serviceType: LoadBalancer
service:
annotations:
metallb.universe.tf/allow-shared-ip: 172.16.2.9
metallb.universe.tf/loadBalancerIPs: 172.16.2.9
isClusterService: false
#podAnnotations:
# k8s.v1.cni.cncf.io/networks: |
# [{
# "namespace": "cascade",
# "name": "br0-static",
# "ips": ["172.16.2.9/12"]
# }]

107
coredns/values.yaml Normal file
View File

@ -0,0 +1,107 @@
replicaCount: 3
servers:
- zones:
- zone: .
port: 53
# If serviceType is nodePort you can specify nodePort here
# nodePort: 30053
# hostPort: 53
plugins:
- name: errors
# Serves a /health endpoint on :8080, required for livenessProbe
- name: health
configBlock: |-
lameduck 5s
# Serves a /ready endpoint on :8181, required for readinessProbe
- name: ready
# Required to query kubernetes API for data
- name: kubernetes
parameters: cluster.local in-addr.arpa ip6.arpa
configBlock: |-
pods insecure
fallthrough in-addr.arpa ip6.arpa
ttl 30
- name: transfer
configBlock: |-
to *
- name: k8s_external
parameters: k
configBlock: |-
fallthrough
# Serves a /metrics endpoint on :9153, required for serviceMonitor
- name: prometheus
parameters: 0.0.0.0:9153
#- name: k8s_gateway
# parameters: cluster.gateway
# configBlock: |-
# resources Ingress
# ttl 10
# individual hosts (full domains but still just hosts)
- {"parameters": "IN A harbor.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN A 172.16.17.115\"", "name": "template"}
- {"parameters": "IN A frigate.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN A 172.16.17.33\"", "name": "template"}
#- {"parameters": "IN A email.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN CNAME mailgun.org.\"", "name": "template"}
#- {"parameters": "IN A pbx.strudelline.net", "configBlock": "answer \"{{ .Name }} 60 IN A 172.16.56.1\"", "name": "template"}
# werts.us
- name: template
parameters: IN A werts.us
configBlock: answer "{{ .Name }} 60 IN A 172.16.17.80"
# minio.strudelline.net
- name: template
parameters: IN A minio.strudelline.net
configBlock: answer "{{ .Name }} 60 IN A 172.16.17.80"
# cascade.strudelline.net
- name: template
parameters: IN A cascade.strudelline.net
configBlock: |
match ^cascade[.]strudelline[.]net[.]$
answer "{{ .Name }} 60 IN A 172.16.34.1"
answer "{{ .Name }} 60 IN A 172.16.33.1"
fallthrough
# *.strudelline.net
- name: template
parameters: IN A strudelline.net
configBlock: |
match ^(?P<name>[^.]*)[.]strudelline[.]net[.]$
answer "{{ .Name }} 60 IN A 172.16.17.80"
fallthrough
# BYPASS FAMILY FILTER FOR SOME SITES
- name: forward
parameters: myrunningman.com 172.16.1.53:153
# *.cascade.strudelline.net
- name: forward
parameters: in-addr.arpa 172.16.33.1 172.16.34.1
- name: forward
parameters: cascade.strudelline.net 172.16.33.1 172.16.34.1
- name: forward
parameters: . 172.16.1.53
configBlock: |
force_tcp
- name: loop
- name: reload
- name: nsid
parameters: "coredns-ext"
- name: cache
parameters: 30
- name: cancel
- name: whoami
- name: loadbalance
- name: log
- name: minimal
serviceType: LoadBalancer
service:
annotations:
metallb.universe.tf/allow-shared-ip: 172.16.1.9
metallb.universe.tf/loadBalancerIPs: 172.16.1.9
isClusterService: false
#podAnnotations:
# k8s.v1.cni.cncf.io/networks: |
# [{
# "namespace": "cascade",
# "name": "br0-static",
# "ips": ["172.16.1.9/12"]
# }]

97
dex/debugger.yaml Normal file
View File

@ -0,0 +1,97 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: debugger
namespace: dex
spec:
ingressClassName: haproxy
rules:
- host: dexdebug.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: debugger
port:
number: 9009
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: dex
name: debugger
spec:
replicas: 1
selector:
matchLabels:
app: debugger
template:
metadata:
labels:
app: debugger
spec:
containers:
- image: ghcr.io/beryju/oidc-test-client:1.4
name: debugger
env:
- name: OIDC_DO_REFRESH
value: "false"
- name: OIDC_DO_INTROSPECTION
value: "false"
- name: OIDC_CLIENT_ID
value: dexdebug
- name: OIDC_CLIENT_SECRET
value: dexdebugSecret
- name: OIDC_PROVIDER
value: https://dex.strudelline.net
- name: OIDC_ROOT_URL
value: https://dexdebug.strudelline.net
- name: OIDC_SCOPES
value: openid,email,groups
ports:
- containerPort: 9009
name: http
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: debugger
namespace: dex
spec:
ports:
- port: 9009
protocol: TCP
targetPort: 9009
selector:
app: debugger
type: ClusterIP
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: debugger-oidc-secret
namespace: dex
spec:
data:
- remoteRef:
key: oidc client - debugger
property: username
secretKey: id
- remoteRef:
key: oidc client - debugger
property: password
secretKey: secret
- remoteRef:
key: oidc client - debugger
property: discovery_url
secretKey: discovery_url
refreshInterval: 60s
secretStoreRef:
kind: ClusterSecretStore
name: bitwarden
target:
name: debugger-oidc-secret

2
dex/deploy.sh Normal file
View File

@ -0,0 +1,2 @@
helm repo add dex https://charts.dexidp.io && helm repo update
helm upgrade -i -n dex --create-namespace dex dex/dex --reuse-values -f values.yaml "$@"

2
dex/diff.sh Normal file
View File

@ -0,0 +1,2 @@
helm repo add dex https://charts.dexidp.io && helm repo update
helm diff upgrade -n dex dex dex/dex --reuse-values -f values.yaml "$@"

67
dex/values.yaml Normal file
View File

@ -0,0 +1,67 @@
config:
connectors:
- config:
bindDN: CN=ldapsearch,OU=ldapsearch,DC=cascade,DC=strudelline,DC=net
#bindPW: run deploy.sh with --set config.connectors[0].config.bindPW="yourpw" to set this value
groupSearch:
baseDN: cn=Users,dc=cascade,dc=strudelline,dc=net
filter: (objectClass=group)
nameAttr: cn
userMatchers:
- groupAttr: member
userAttr: distinguishedName
host: cascade.strudelline.net:636
insecureNoSSL: false
insecureSkipVerify: true
userSearch:
baseDN: cn=Users,dc=cascade,dc=strudelline,dc=net
emailAttr: mail
filter: (objectClass=person)
idAttr: sAMAccountName
nameAttr: cn
preferredUsernameAttr: sAMAccountName
username: sAMAccountName
usernamePrompt: username
id: ad
name: ActiveDirectory
type: ldap
enablePasswordDB: true
issuer: https://dex.strudelline.net
oauth2:
responseTypes:
- code
- token
- id_token
skipApprovalScreen: true
staticClients:
- id: dexdebug
name: Dex Debugger
redirectURIs:
- https://dexdebug.strudelline.net/auth/callback
secret: dexdebugSecret
- id: gitea
name: Dex Debugger
redirectURIs:
- https://git.strudelline.net/user/oauth2/werts/callback
secret: nUs1qeYWA7o3poJFM8gXJMQhwoMIA3py7go8lPEdWTNwZTXW5HnsxJMYSlolBbFt5OS5u3rUapwehGJ19opECR
- id: oa2p
name: oauth2proxy
redirectURIs:
- https://oidc.strudelline.net/be/callback
secret: oa2ptest
staticPasswords:
- email: test@strudelline.net
hash: $2a$10$2b2cU8CPhOTaGrs1HRQuAueS7JTT5ZHsHSzYiFPm1leZck7Mc8T4W
userID: 08a8684b-db88-4b73-90a9-3cd1661f5466
username: test
storage:
config:
inCluster: true
type: kubernetes
ingress:
enabled: true
hosts:
- host: dex.strudelline.net
paths:
- path: /
pathType: Prefix

40
dhcp-server/cm.yaml Normal file
View File

@ -0,0 +1,40 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dnsmasq-etc
namespace: dhcp-server
data:
logdhcp.conf: log-dhcp
disable_dns.conf: port=53
dhcp_auth.conf: dhcp-authoritative
use_net1.conf: interface=net1
defaultgw.conf: dhcp-option=3,172.16.1.1
defaultdns.conf: dhcp-option=6,172.16.1.8
defaultntp.conf: dhcp-option=42,172.16.1.1
defaulttz.conf: dhcp-option=2,0xffffb9b0
domain_15.conf: dhcp-option=15,cascade.strudelline.net
tftp_grandstream.conf: dhcp-option=tag:grandstream,66,https://pbx.strudelline.net/app/provision
http_grandstream.conf: dhcp-option=tag:grandstream,160,https://pbx.strudelline.net/app/provision
mac_grandstream.conf: dhcp-mac=set:grandstream,00:0b:82
domain.conf: domain=cascade.strudelline.net
range_default.conf: dhcp-range=default,172.20.2.0,172.20.255.255,255.240.0.0,1h
range_clients.conf: dhcp-range=clients,172.17.0.0,static,255.240.0.0,4h
range_servers.conf: dhcp-range=servers,172.16.0.0,static,255.240.0.0,24h
range_cameras.conf: dhcp-range=cameras,172.28.1.1,172.28.1.255,255.240.0.0,1h
range_grandstream.conf: dhcp-range=grandstream,172.29.1.1,172.29.1.255,255.240.0.0,1h
server_001132c83aed.conf: dhcp-host=00:11:32:c8:3a:ed,id:*,net:servers,172.16.18.1,noctowl
server_0007324be4c2.conf: dhcp-host=00:07:32:4b:e4:c2,id:*,net:servers,172.16.61.1,api1
server_0007324e8913.conf: dhcp-host=00:07:32:4e:89:13,id:*,net:servers,172.16.62.1,api2
server_0007324bfcb3.conf: dhcp-host=00:07:32:4b:fc:b3,id:*,net:servers,172.16.63.1,api3
server_1c1b0d9d5649.conf: dhcp-host=1c:1b:0d:9d:56:49,id:*,net:servers,172.16.32.1,absol
server_008010ecaff4.conf: dhcp-host=00:80:10:ec:af:f4,id:*,net:servers,172.16.56.1,kirlia
server_021132293ca4.conf: dhcp-host=02:11:32:29:3c:a4,id:*,net:servers,172.16.55.1,home
client_706655342463.conf: dhcp-host=70:66:55:34:24:63,id:*,net:clients,172.17.19.100,19weewees
client_5cc5d4a718d1.conf: dhcp-host=5c:c5:d4:a7:18:d1,id:*,net:clients,172.17.3.100,mrs-bugwert
client_a483e7c51e2a.conf: dhcp-host=a4:83:e7:c5:1e:2a,id:*,net:clients,172.17.50.100,Jamess-MBP
#client_5414f3623aa4.conf: dhcp-host=54:14:f3:62:3a:a4,id:*,net:clients,172.17.19.101,wesley
client_5414f3623aa4.conf: dhcp-host=58:11:22:4c:5d:0f,id:*,net:clients,172.17.19.101,wesley
client_2c8db1976f99.conf: dhcp-host=2c:8d:b1:97:6f:99,id:*,net:clients,172.17.6.101,jonathan
client_dca632382c1e.conf: dhcp-host=dc:a6:32:38:2c:1e,id:*,net:servers,172.16.88.1,rpi4
client_b827eba2eec3.conf: dhcp-host=b8:27:eb:a2:ee:c3,id:*,net:servers,172.27.2.1,camera-1
client_9c8ecd3fc616.conf: dhcp-host=9c:8e:cd:3f:c6:16,id:*,net:cameras,172.28.2.2,camera-2

View File

@ -0,0 +1,49 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: dhcp-server
name: dhcp-server
annotations:
reloader.stakater.com/auto: "true"
spec:
replicas: 1
selector:
matchLabels:
app: dhcp-server
strategy:
type: Recreate
template:
metadata:
labels:
app: dhcp-server
annotations:
k8s.v1.cni.cncf.io/networks: |
[{
"namespace": "cascade",
"name": "br0-static",
"ips": ["172.16.1.67/12"]
}]
spec:
containers:
- image: jamesandariese/alpine-dnsmasq:0.1.3
name: dnsmasq
env:
- name: TZ
value: America/Chicago
volumeMounts:
- name: dnsmasq-etc
mountPath: /etc/dnsmasq.d
- name: dnsmasq-data
mountPath: /data
securityContext:
capabilities:
add: ["NET_ADMIN"]
restartPolicy: Always
volumes:
- name: dnsmasq-etc
configMap:
name: dnsmasq-etc
- name: dnsmasq-data
persistentVolumeClaim:
claimName: dnsmasq-data

4
dhcp-server/ns.yaml Normal file
View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: dhcp-server

13
dhcp-server/pvc.yaml Normal file
View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: dnsmasq-data
namespace: dhcp-server
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
storageClassName: longhorn
volumeMode: Filesystem

View File

@ -4,7 +4,7 @@ metadata:
name: homeassistant name: homeassistant
namespace: external-services namespace: external-services
spec: spec:
externalName: 172.25.194.19 externalName: 172.16.55.1
type: ExternalName type: ExternalName
ports: ports:
- name: http - name: http
@ -17,7 +17,7 @@ metadata:
name: homeassistant name: homeassistant
namespace: external-services namespace: external-services
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: home.strudelline.net - host: home.strudelline.net
http: http:

View File

@ -4,7 +4,7 @@ metadata:
name: minio-admin name: minio-admin
namespace: external-services namespace: external-services
spec: spec:
externalName: noctowl.cascade.strudelline.net externalName: 172.16.18.1
type: ExternalName type: ExternalName
ports: ports:
- name: http - name: http
@ -17,7 +17,7 @@ metadata:
name: minio-admin name: minio-admin
namespace: external-services namespace: external-services
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: minio-admin.strudelline.net - host: minio-admin.strudelline.net
http: http:
@ -29,7 +29,3 @@ spec:
name: minio-admin name: minio-admin
port: port:
number: 58714 number: 58714
tls:
- hosts:
- minio-admin.strudelline.net
secretName: wildcard-tls

View File

@ -4,7 +4,7 @@ metadata:
name: minio name: minio
namespace: external-services namespace: external-services
spec: spec:
externalName: noctowl.cascade.strudelline.net externalName: 172.16.18.1
type: ExternalName type: ExternalName
ports: ports:
- name: http - name: http
@ -17,8 +17,18 @@ metadata:
name: minio name: minio
namespace: external-services namespace: external-services
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: '*.minio.strudelline.net'
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: minio
port:
number: 58713
- host: werts.us.minio.strudelline.net - host: werts.us.minio.strudelline.net
http: http:
paths: paths:
@ -39,7 +49,3 @@ spec:
name: minio name: minio
port: port:
number: 58713 number: 58713
tls:
- hosts:
- minio.strudelline.net
secretName: wildcard-tls

View File

@ -4,7 +4,7 @@ metadata:
name: noctowl name: noctowl
namespace: external-services namespace: external-services
spec: spec:
externalName: noctowl.cascade.strudelline.net externalName: 172.16.18.1
type: ExternalName type: ExternalName
ports: ports:
- name: http - name: http
@ -17,7 +17,7 @@ metadata:
name: noctowl name: noctowl
namespace: external-services namespace: external-services
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: noctowl.strudelline.net - host: noctowl.strudelline.net
http: http:
@ -29,7 +29,3 @@ spec:
name: noctowl name: noctowl
port: port:
number: 5000 number: 5000
tls:
- hosts:
- noctowl.strudelline.net
secretName: wildcard-tls

View File

@ -1,35 +1,31 @@
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: windmill name: plex
namespace: external-services namespace: external-services
spec: spec:
externalName: noctowl.cascade.strudelline.net externalName: 172.16.18.1
type: ExternalName type: ExternalName
ports: ports:
- name: http - name: http
protocol: TCP protocol: TCP
port: 8444 port: 32400
--- ---
apiVersion: networking.k8s.io/v1 apiVersion: networking.k8s.io/v1
kind: Ingress kind: Ingress
metadata: metadata:
name: windmill name: plex
namespace: external-services namespace: external-services
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: windmill.strudelline.net - host: plex.strudelline.net
http: http:
paths: paths:
- path: / - path: /
pathType: Prefix pathType: Prefix
backend: backend:
service: service:
name: windmill name: plex
port: port:
number: 8444 number: 32400
tls:
- hosts:
- windmill.strudelline.net
secretName: wildcard-tls

View File

@ -4,7 +4,7 @@ metadata:
name: webdav name: webdav
namespace: external-services namespace: external-services
spec: spec:
externalName: noctowl.cascade.strudelline.net externalName: 172.16.18.1
type: ExternalName type: ExternalName
ports: ports:
- name: http - name: http
@ -20,7 +20,7 @@ metadata:
ingress.kubernetes.io/config-backend: | ingress.kubernetes.io/config-backend: |
http-request set-header X-Real-IP %[src] http-request set-header X-Real-IP %[src]
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: webdav.strudelline.net - host: webdav.strudelline.net
http: http:
@ -32,7 +32,3 @@ spec:
name: webdav name: webdav
port: port:
number: 5005 number: 5005
tls:
- hosts:
- webdav.strudelline.net
secretName: wildcard-tls

View File

@ -0,0 +1,21 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: factorio-com-caddr
namespace: factorio-servers
spec:
refreshInterval: "600s"
secretStoreRef:
name: bitwarden
kind: ClusterSecretStore
target:
name: factorio-com-caddr
data:
- secretKey: username
remoteRef:
key: 'Factorio'
property: username
- secretKey: token
remoteRef:
key: 'Factorio'
property: token

View File

@ -0,0 +1,85 @@
apiVersion: batch/v1
kind: Job
metadata:
name: build-trt-models
namespace: frigate
spec:
parallelism: 1
completions: 1
backoffLimit: 6
completionMode: NonIndexed
template:
spec:
restartPolicy: OnFailure
runtimeClassName: nvidia
containers:
- name: builder
image: nvcr.io/nvidia/tensorrt:22.07-py3
command:
- bash
- /tensorrt_models.sh
env:
- name: USE_FP16
value: "False"
- name: YOLO_MODELS
value: yolov7-640
volumeMounts:
- name: trt-models
mountPath: /tensorrt_demos
subPath: tensorrt_demos
- name: trt-models
mountPath: /tensorrt_models
- name: tensorrt-build-models-script
mountPath: /tensorrt_models.sh
subPath: tensorrt_models.sh
volumes:
- name: trt-models
persistentVolumeClaim:
claimName: trt-models
- name: tensorrt-build-models-script
configMap:
name: tensorrt-build-models-script
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tensorrt-build-models-script
namespace: frigate
data:
tensorrt_models.sh: |
#!/bin/bash
set -euxo pipefail
CUDA_HOME=/usr/local/cuda
LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/cuda/lib64:/usr/local/cuda/extras/CUPTI/lib64
OUTPUT_FOLDER=/tensorrt_models
echo "Generating the following TRT Models: ${YOLO_MODELS:="yolov4-tiny-288,yolov4-tiny-416,yolov7-tiny-416"}"
# Create output folder
mkdir -p ${OUTPUT_FOLDER}
# Install packages
pip install --upgrade pip && pip install onnx==1.9.0 protobuf==3.20.3
if [ ! -d /tensorrt_demos/.git ];then
# Clone tensorrt_demos repo
git clone --depth 1 https://github.com/yeahme49/tensorrt_demos.git /tensorrt_demos
fi
# Build libyolo
cd /tensorrt_demos/plugins && make all
cp libyolo_layer.so ${OUTPUT_FOLDER}/libyolo_layer.so
# Download yolo weights
cd /tensorrt_demos/yolo && ./download_yolo.sh
# Build trt engine
cd /tensorrt_demos/yolo
for model in ${YOLO_MODELS//,/ }
do
python3 yolo_to_onnx.py -m ${model}
python3 onnx_to_tensorrt.py -m ${model}
cp /tensorrt_demos/yolo/${model}.trt ${OUTPUT_FOLDER}/${model}.trt;
done

12
frigate/deploy.sh Normal file
View File

@ -0,0 +1,12 @@
helm repo add blakeblackshear https://blakeblackshear.github.io/blakeshome-charts/
kubectl apply -f pvc.yaml
helm upgrade --install \
-n frigate \
--create-namespace \
frigate \
blakeblackshear/frigate \
-f values.yaml

10
frigate/diff.sh Normal file
View File

@ -0,0 +1,10 @@
helm repo add blakeblackshear https://blakeblackshear.github.io/blakeshome-charts/ && helm repo update
kubectl diff -f pvc.yaml
helm diff \
-n frigate \
frigate \
blakeblackshear/frigate \
-f values.yaml

26
frigate/ingress.yaml Normal file
View File

@ -0,0 +1,26 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: frigate
namespace: frigate
spec:
ingressClassName: haproxy
rules:
- host: frigate.strudelline.net
http:
paths:
#- path: /oauth2
- path: /
pathType: Prefix
backend:
service:
name: oauth2-proxy
port:
name: http
#- path: /
# pathType: Prefix
# backend:
# service:
# name: frigate
# port:
# number: 5000

View File

@ -0,0 +1,23 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "mqtt-broker",
"namespace": "frigate",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "mqtt-broker",
"namespace": "frigate",
"creationTimestamp": null
},
"type": "Opaque"
},
"encryptedData": {
"FRIGATE_MQTT_PASSWORD": "AgBtScnU49NZjv+310TI1n61bRCDR+VJF/abFsvlQAUoLMJUrTj2t6C7ifXduO2eMjHxvYJIY5we8FUpodqAfOm8rMq+WYtrRBF7dmYkYgcp+vQnTPc8rm0MU7YFdtF6kUKPNvIjIRzrAS9tFZidJGZqGRJ7jpL9KB7tUQuTAz4bZRmH8kCqW6U9vihxNavhp2qCtsthafGX6zCkngJdWzRmN7aGzlVo/5+td1QQsra3/NScCJN5PS2FFeOImNGnKyBw993ki0K4wXwyAnzK0WXqpSGBAhLVUXZeUYouu6jAhUj6buQ56haG7I0Rc0IkZwckPzTwt2lhX8MeE9nhcggl/CqcJF9WGh1iXWYS/I33lWc286yoJ/3nKS8J5m+17u9idFiYwURlM9rDxR74NtUW0mVjieNvm0Log9uevg1TzGCzhwEfNS117atYhzYSdEPrmd+hYcwD7DXM4HgeA6+bstyJgprjhRaeHlOgTPwwypNgeNWCe+GtAXQJzQIfpu97B4FunMHKw8ucdX+EoOFbdBGSh1DC+KfsP4Vi75VxplIA+Qk/Xy7cvIilcrlNuZ9MwGxAUHlAyt6PMoJ/+61a/EfJKrlFPgRIWJ3LigjS61Z1eyVseQZm2oB+xQwpqGRwctEI1LMo1sWJWVBrCw+wsFpuQCcN1NWZ9+99FOVmUXu/9lRNB4Yeh3b8Q6k+1Be3NKIiLURJ",
"FRIGATE_MQTT_USER": "AgB7hTaMioXG0wtF27hMoqT7hCMkTFbdJ4L6VSel5xbSOmtojwEbUXGIWP50jSUDwFGQbPxDAGHRGwgjVy0AsVtTDQfhmEEbBqbU43FPESwqDxzCbYPWlJ8K0wcGDQe4PYDg3sYJpO9qF4MGjVaVSN4tAbvbYMDTCCmhMAQ3mJef6EpXoMUmgWUCeBxahst3XlfX2mjJTwgXIQ0FloUTZDxMjdXDkkZBKq6VLBV6csp1uR6NN65otmAai371OPgzTxkI8gZVGsJ5/dkkiisdYzytNaT6GL8qnJp9szx3yh3S4ry3LxWAmT9DwPypmMw0L1zM81AKZZuB3zzAQfRw04X3QGsMFVxCiLjkwooPOD1S73lfpSdXE10z5JWS7mnB6TtIdQtVXbsVSeCUpsodyyFcEdDxHXK9zGxPBItvx6zTrWMVFUqAeGuIeBiIeC69Zvn7IAJCn6D/ePOeIW4Cfcm/3N2NLWz8FkKmwiBIAT/DEm9lFRpIQJWyLze1tnhyAr4YkMECHtwlHF3F5AyovKYFTcLZlHBoEg0iqpQOjiSZQg9aHB03nNJIZH7raPfaf2ZqbiOJlL+hENNs3Ggu80EKi56uDqlvLbXchhEDFthmgQrv/F4M6T+XtsxaLj/9YN4C/tHK0EDaI4XzqdAN3HaZUsfklx/P7kMFW/t67XpuhvfW4AgRL5tM0zyOpjZQRJ1kPFdfFw=="
}
}
}

View File

@ -0,0 +1,22 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "oauth2-proxy",
"namespace": "frigate",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "oauth2-proxy",
"namespace": "frigate",
"creationTimestamp": null
},
"type": "Opaque"
},
"encryptedData": {
"cookie_secret": "AgBLbPOphYpZdC8lgB6Oz3VmzMvKuIT65zk8kalU1Hxabv/zvEf8EuvB7AfACznVmRukxjZX2rCYVwGZ/I2DGXqeSr6BhN/6/YHuTLUv6vNCSL2uuBh1sBV1HIBtuZVM1RW06bVzSD/AuhHTq8on6UG2WYRiRGfTWSr3ByfjjQsMdQ7HFSUCZlLxtijjdC/nGI9dvOOHcDwl4A1S4jhAuBIU5hs/kb2/q0f4QABrc+CIb+6kDCaRyaXNKC+/PUH1bGROexGTWL6hzBw3FU8EVkdxyZEZ+a4Z/CmkwSQRjLVS57UQCMPy3J8/vTtMfFdwfBnXnDYP9STVDyg5nudOxBkxc/+NVqRpMThimKsLCA/wGaHV2oPJvtLMAILUPeHpdoxX475Bapv0ZyNkABKvKYbZyO3CSE1fcHBl14A0K2JXC0VUjHEmEcuomPe667MMicbUhaiRWlv1Q+U5DeodII8UNIqdXOKBTzRGt4tx7RWTE8aqudRMIm9x9fYsOwc0sa6V3WTZtvUZyVt3KEu6c2I4OvIz/uBBvUm3zcLvJ9c38hhKYYUCsyqkYpgvwiS+wfFO3/7K4mK7ca61xUUHnNhxU8UAyox2ogYzcTSnRAAVSrBk81w8rsnW5sNuaHrnH17kh17GXvP5tccLphngtA7BdzTuKQTRTjl1vwv8R0+rLNyQJSbRMG2BAvSRET8xfWnfs3TeiACfv/82InHA8e3dsQmRRknEH69Iev1VsOKzQBtStlXhx25wQ7woMw=="
}
}
}

220
frigate/oauth2-proxy.yaml Normal file
View File

@ -0,0 +1,220 @@
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: oauth2-proxy
name: oauth2-proxy
namespace: frigate
spec:
replicas: 1
selector:
matchLabels:
app: oauth2-proxy
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: oauth2-proxy
spec:
initContainers:
- name: password-creator
image: httpd:alpine3.19
command:
- /usr/local/apache2/bin/htpasswd
- -Bbc
- /xfr/htpasswd
- "$(OIDC_BYPASS_USERNAME)"
- "$(OIDC_BYPASS_PASSWORD)"
envFrom:
- secretRef:
name: oidc-bypass-user
volumeMounts:
- name: htpasswd-xfr
mountPath: /xfr
containers:
- name: oauth2-proxy-http
image: quay.io/oauth2-proxy/oauth2-proxy:v7.4.0
imagePullPolicy: IfNotPresent
env:
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
name: oidc-client
key: client_id
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oidc-client
key: client_secret
- name: OAUTH2_PROXY_COOKIE_SECRET
valueFrom:
secretKeyRef:
name: oauth2-proxy
key: cookie_secret
- name: OAUTH2_PROXY_UPSTREAMS
value: http://frigate:5000
args:
- --http-address=0.0.0.0:4180
- --whitelist-domain=strudelline.net:*
- --whitelist-domain=.strudelline.net:*
- --cookie-domain=strudelline.net
- --email-domain=werts.us
- --email-domain=strudelline.net
- --email-domain=andariese.net
- --cookie-secure
- --skip-provider-button
- --htpasswd-file=/xfr/htpasswd
- --set-xauthrequest
- --provider=oidc
- --oidc-issuer-url=https://auth.werts.us/realms/werts
- --trusted-ip=172.16.0.0/16
- --cookie-csrf-per-request
volumeMounts:
- name: htpasswd-xfr
mountPath: /xfr
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- containerPort: 4180
name: http
protocol: TCP
- name: oauth2-proxy-https
image: quay.io/oauth2-proxy/oauth2-proxy:v7.4.0
imagePullPolicy: IfNotPresent
env:
- name: OAUTH2_PROXY_CLIENT_ID
valueFrom:
secretKeyRef:
name: oidc-client
key: client_id
- name: OAUTH2_PROXY_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: oidc-client
key: client_secret
- name: OAUTH2_PROXY_COOKIE_SECRET
valueFrom:
secretKeyRef:
name: oauth2-proxy
key: cookie_secret
- name: OAUTH2_PROXY_UPSTREAMS
value: http://frigate:5000
args:
- --https-address=0.0.0.0:4443
- --tls-cert-file=/certs/tls.crt
- --tls-key-file=/certs/tls.key
- --whitelist-domain=strudelline.net:*
- --whitelist-domain=.strudelline.net:*
- --cookie-domain=strudelline.net
- --email-domain=werts.us
- --email-domain=strudelline.net
- --email-domain=andariese.net
- --cookie-secure
- --skip-provider-button
- --htpasswd-file=/xfr/htpasswd
- --set-xauthrequest
- --provider=oidc
- --oidc-issuer-url=https://auth.werts.us/realms/werts
- --trusted-ip=172.16.0.0/16
- --cookie-csrf-per-request
volumeMounts:
- name: htpasswd-xfr
mountPath: /xfr
- name: certs
mountPath: /certs
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
port: https
scheme: HTTPS
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- containerPort: 4443
name: https
protocol: TCP
volumes:
- name: htpasswd-xfr
emptyDir:
medium: Memory
sizeLimit: 5Mi
- name: certs
secret:
secretName: wildcard-tls
terminationGracePeriodSeconds: 2
---
apiVersion: v1
kind: Service
metadata:
labels:
app: oauth2-proxy
annotations:
metallb.universe.tf/allow-shared-ip: 172.16.17.33
metallb.universe.tf/loadBalancerIPs: 172.16.17.33
name: oauth2-proxy
namespace: frigate
spec:
type: LoadBalancer
externalTrafficPolicy: Local
internalTrafficPolicy: Local
ports:
- name: http-redirect
port: 80
protocol: TCP
targetPort: 4180
- name: https-frigate
port: 443
protocol: TCP
targetPort: 4443
- name: http-frigate
port: 5000
protocol: TCP
targetPort: 4180
- name: http
port: 4180
protocol: TCP
targetPort: 4180
selector:
app: oauth2-proxy
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/instance: frigate
app.kubernetes.io/name: frigate
annotations:
metallb.universe.tf/allow-shared-ip: 172.16.17.33
metallb.universe.tf/loadBalancerIPs: 172.16.17.33
name: frigate-lb
namespace: frigate
spec:
type: LoadBalancer
externalTrafficPolicy: Local
internalTrafficPolicy: Local
ports:
- name: rtmp
port: 1935
protocol: TCP
targetPort: 1935
- name: restream
port: 8554
protocol: TCP
targetPort: 8554
selector:
app.kubernetes.io/instance: frigate
app.kubernetes.io/name: frigate

View File

@ -0,0 +1,23 @@
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "oidc-bypass-user",
"namespace": "frigate",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "oidc-bypass-user",
"namespace": "frigate",
"creationTimestamp": null
},
"type": "Opaque"
},
"encryptedData": {
"OIDC_BYPASS_PASSWORD": "AgAb60RtJhGd4lcb0UQSn37Rc5WYEcnuq/wTcc7+nwprl9h8kJFo2muGioik7RLlwbpEstdWQ/e1J6KZZ1WoCxXe/W5Etvc0nCaHLOCCdxrH8V+jypLiaOHQVfwYWpdVBcxjWsY2NY0M3pe15dZRot5uBoZBGgr7kkrp4V4/gdiK9WswrhQpSCHWgMrmc7/yVHIipOsbOOzGVxIO/LBVHz4Xb+0h10gSAv5morWkj9eiXY+bGb7Ujhy0bzzYtGX1i2ioPErCbIthBCQtVVuLHi6PkrOrc7DV9AcSrOTrQYiV2cT15nYGp0uhGMa9b4tGuyhRbP6uHl3FmT+KVB6OosUGX67MJorptCfRiZ77c74IBu/3KMOAqA/EMTczoawUevxqXvob6O3qoRvQFUWHkXq+DjeQ2iqvuJDjyIWsGrbOO0hmElhdBjHSeQeJUoTb/MnfUAGMtY/G2Aywn67O1PLwWvXJz7V3sB6wBQBUp1+nWpqyx0E+MYD0x1APSemOV+VnHAm4lCAurE2GAFy+5Q2Ve/cbcUc1kkYQ+FWKf0wUPeG+aZ062QCQEVZ1Tb0NI/6jJIoijpt1EgEgQ0FZOj+GzEGQ5FrP1Pi/tL2WQCi27MCuWbQUnfhX0ynVDR5A1Av4euwyJgD8nwHPQ9wqjqjpJ/cymmBV10Smlx0x0u1JvwrwurJGKnG4D52FHJC7WiqBsF8vybDcjb5f2JvUCDPpWbBIhNQcjS0jHQI0jp4mdQ==",
"OIDC_BYPASS_USERNAME": "AgCKMh79ai7hOhnT+92hnUXokvQKVBtjI3LLYIolXQLkcd7IVbtnRr2zS/fayQquWO2VefDBtKVW98Uq2yRFttYpAx6TyRFKz8Etvr4h0QSF7MyzJ1CB6ypR73WIvDRbAak4PuQstj02x7L1p4f3Xs4lr/RxB5VATA2rpk90uGgyjUKpA5mssbk1ghwC8b9c1DD7/B/ZwHE4ozjFDBS/Lrh2bMxNwKuQlJ2Ra3HlAGWQqCZ/A9DKKCpUnlhh8SLDE7r0aCIFBM4wyWdG1LWwVkaFpLP8hHdWhUyH9rtNCKhAUBYxpGwIC2XJaXvbm/bndcHlRUzrOAnoaXh69g/WxBcWAT/kCMkWFTFZfVPb2svlRgpNoD+srjXZqplOqLenAQAP3yPH1wDDQCm9XUZDycVKAdfWJsiMI3+/Y6YFUY/fysPcn5uw8+COfa1D4HV/bBVTD22V9BsF4kfVA5UXy6y6coFOs5UzODKgCrtp6KoOnU6/J7MpjEN57H0+uTW0rJHyw5L9Qiwg/wRKgDtfzx9fWcElkkDV2BSipi/tDxVA53WwtqHDcHxVYxg5arx0JzS/IbYNEPYhS2yXnrmnQFejle+pLKhqWRoE1892iiaUYyCdivy6MogURpsPzX/891Qfe0RPg8Du/I484m50W1pUb/w36c6CJy6xI4WZ73gtp1pey/Uy6sWcszJKeHLFdXhZr3DOAXQ="
}
}
}

15
frigate/pvc.yaml Normal file
View File

@ -0,0 +1,15 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: trt-models
namespace: frigate
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 50Gi
storageClassName: longhorn
volumeMode: Filesystem

97
frigate/values.yaml Normal file
View File

@ -0,0 +1,97 @@
image:
tag: "0.12.0-tensorrt"
envFromSecrets:
- mqtt-broker
- rtsp-secret
config: |
ffmpeg:
hwaccel_args: preset-nvidia-h264
mqtt:
enabled: True
user: "{FRIGATE_MQTT_USER}"
password: "{FRIGATE_MQTT_PASSWORD}"
port: 1883
host: 172.16.17.83
record:
enabled: True
events:
retain:
default: 10
retain:
days: 7
mode: motion
cameras:
cammy: # <------ Name the camera
ffmpeg:
inputs:
- path: "rtsp://{FRIGATE_RTSP_USER}:{FRIGATE_RTSP_PASSWORD}@172.28.2.2:554"
roles:
- record
- path: "rtsp://{FRIGATE_RTSP_USER}:{FRIGATE_RTSP_PASSWORD}@172.28.2.2:554/cam/realmonitor?channel=1&subtype=0"
roles:
- detect
detect:
enabled: False # <---- disable detection until you have a working camera feed
width: 640 # <---- update for your camera's resolution
height: 480 # <---- update for your camera's resolution
detectors:
tensorrt:
type: tensorrt
device: 0 #This is the default, select the first GPU
model:
path: /trt-models/yolov7-640.trt
input_tensor: nchw
input_pixel_format: rgb
width: 640
height: 640
service:
type: ClusterIP
gpu:
nvidia:
enabled: true
runtimeClassName: nvidia
ingress:
enabled: false
hosts:
- host: frigate.strudelline.net
paths:
- /
extraVolumeMounts:
- name: trt-models
mountPath: /trt-models
- name: data
mountPath: /media
subPath: media
- name: cctv-synology
mountPath: /media/frigate/clips
subPath: clips
- name: cctv-synology
mountPath: /media/frigate/recordings
subPath: recordings
extraVolumes:
- name: trt-models
persistentVolumeClaim:
claimName: trt-models
- name: cctv-synology
nfs:
server: 172.16.18.1
path: /volume1/cctv/frigate
persistence:
data:
enabled: true
skipuninstall: true
size: 100Gi

74
frigate/wildcard-tls.yaml Normal file
View File

@ -0,0 +1,74 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
namespace: wildcard-tls
name: wildcard-tls-reader
rules:
- apiGroups: [""]
resources:
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- authorization.k8s.io
resources:
- selfsubjectrulesreviews
verbs:
- create
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: wildcard-tls-sa
namespace: frigate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: wildcard-tls-reader-from-frigate
namespace: wildcard-tls
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: wildcard-tls-reader
subjects:
- kind: ServiceAccount
name: wildcard-tls-sa
namespace: frigate
---
apiVersion: external-secrets.io/v1beta1
kind: SecretStore
metadata:
name: wildcard-tls
namespace: frigate
spec:
provider:
kubernetes:
# with this, the store is able to pull only from `default` namespace
remoteNamespace: wildcard-tls
server:
caProvider:
type: ConfigMap
name: kube-root-ca.crt
key: ca.crt
auth:
serviceAccount:
name: "wildcard-tls-sa"
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: wildcard-tls
namespace: frigate
spec:
refreshInterval: 1h
secretStoreRef:
kind: SecretStore
name: wildcard-tls
target:
name: wildcard-tls
dataFrom:
- extract:
key: wildcard-tls

19
fusionpbx/ingress.yaml Normal file
View File

@ -0,0 +1,19 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: pbx
namespace: fusionpbx
spec:
ingressClassName: haproxy
rules:
- host: pbx.werts.us
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: pbx-untls-shim
port:
number: 80

7
fusionpbx/ns.yaml Normal file
View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: fusionpbx
spec: {}
status: {}

13
fusionpbx/pvc.yaml Normal file
View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: fusionpbx-root
namespace: fusionpbx
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 40Gi
storageClassName: ssd
volumeMode: Filesystem

107
fusionpbx/untls-shim.yaml Normal file
View File

@ -0,0 +1,107 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: "pbx-untls-shim"
namespace: "fusionpbx"
data:
haproxy.cfg: |
global
log stdout format raw local0
stats timeout 30s
user haproxy
group haproxy
defaults
log global
mode http
option httplog
option dontlognull
balance source
timeout connect 5000
timeout client 50000
timeout server 50000
http-reuse never
option disable-h2-upgrade
frontend http80
bind *:80
http-request capture req.hdr(Host) len 255
default_backend httpnodes
backend httpnodes
option forwardfor
http-request add-header x-forwarded-proto https
server s1 172.16.56.1:443 ssl verify none check
frontend stats
mode http
option httplog
bind *:8404
http-request capture req.hdr(X-Forwarded-For) len 64
http-request capture req.hdr(Host) len 255
stats enable
stats uri /
stats refresh 10s
stats admin if LOCALHOST
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: "pbx-untls-shim"
namespace: "fusionpbx"
annotations:
"reloader.stakater.com/auto": "true"
spec:
replicas: 1
selector:
matchLabels:
app: "pbx-untls-shim"
strategy:
type: RollingUpdate
template:
metadata:
labels:
app: "pbx-untls-shim"
spec:
containers:
- image: haproxy:latest
name: haproxy
volumeMounts:
- mountPath: /usr/local/etc/haproxy/haproxy.cfg
name: config
subPath: haproxy.cfg
ports:
- containerPort: 80
name: http
protocol: TCP
- containerPort: 8404
name: stats
protocol: TCP
restartPolicy: Always
volumes:
- name: config
configMap:
name: "pbx-untls-shim"
---
apiVersion: v1
kind: Service
metadata:
name: "pbx-untls-shim"
namespace: "fusionpbx"
spec:
ipFamilies:
- IPv4
ipFamilyPolicy: SingleStack
ports:
- name: http-80
port: 80
protocol: TCP
targetPort: http
- name: https-8404
port: 8404
protocol: TCP
targetPort: stats
selector:
app: "pbx-untls-shim"
type: ClusterIP

3
fusionpbx/update-tls.sh Executable file
View File

@ -0,0 +1,3 @@
kubectl secretdata -n wildcard-tls wildcard-tls | yq -o json '.[][]["tls.key"]' | jq -r . | ssh 172.16.56.1 sudo sponge /etc/ssl/private/nginx.key
kubectl secretdata -n wildcard-tls wildcard-tls | yq -o json '.[][]["tls.crt"]' | jq -r . | ssh 172.16.56.1 sudo sponge /etc/ssl/certs/nginx.crt
ssh 172.16.56.1 sudo systemctl restart nginx

58
fusionpbx/vm.yaml Normal file
View File

@ -0,0 +1,58 @@
---
apiVersion: kubevirt.io/v1
kind: VirtualMachine
metadata:
name: fusionpbx
namespace: fusionpbx
spec:
running: true
template:
spec:
terminationGracePeriodSeconds: 3
domain:
cpu:
model: Westmere
cores: 2
threads: 1
sockets: 1
resources:
requests:
cpu: 1000m
memory: 1G
devices:
interfaces:
- name: br0
bridge: {}
macAddress: 00:80:10:ec:af:f4
model: virtio
disks:
#- name: iso
# disk:
# bus: virtio
- name: root
disk:
bus: virtio
networks:
- name: br0
multus:
networkName: cascade/br0
volumes:
- persistentVolumeClaim:
claimName: fusionpbx-root
name: root
#- dataVolume:
# name: debian-iso
# name: iso
#dataVolumeTemplates:
#- metadata:
# name: debian-iso
# spec:
# pvc:
# accessModes:
# - ReadWriteOnce
# resources:
# requests:
# storage: 1Gi
# source:
# http:
# url: https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.1.0-amd64-netinst.iso

39
gitea/email-secret.yaml Normal file
View File

@ -0,0 +1,39 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: email-secret
namespace: gitea
spec:
secretStoreRef:
kind: ClusterSecretStore
name: bitwarden
data:
- remoteRef:
key: gmail app password (gitea)
property: password
secretKey: GITEA__mailer__PASSWD
- remoteRef:
key: gmail app password (gitea)
property: username
secretKey: GITEA__mailer__USER
- remoteRef:
key: gmail app password (gitea)
property: from
secretKey: GITEA__mailer__FROM
- remoteRef:
key: gmail app password (gitea)
property: port
secretKey: GITEA__mailer__SMTP_PORT
- remoteRef:
key: gmail app password (gitea)
property: host
secretKey: GITEA__mailer__SMTP_ADDR
refreshInterval: 5m
target:
creationPolicy: Owner
deletionPolicy: Delete
name: email-secret
template:
mergePolicy: "Merge"
data:
GITEA__mailer__ENABLED: "true"

34
gitea/gitea-secrets.yaml Normal file
View File

@ -0,0 +1,34 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: gitea-secrets
namespace: gitea
spec:
secretStoreRef:
kind: ClusterSecretStore
name: bitwarden
data:
- remoteRef:
key: gitea secrets
property: GITEA__security__SECRET_KEY
secretKey: GITEA__security__SECRET_KEY
- remoteRef:
key: gitea secrets
property: GITEA__oauth2__JWT_SECRET
secretKey: GITEA__oauth2__JWT_SECRET
- remoteRef:
key: gitea secrets
property: GITEA__security__INTERNAL_TOKEN
secretKey: GITEA__security__INTERNAL_TOKEN
- remoteRef:
key: gitea secrets
property: GITEA__server__LFS_JWT_SECRET
secretKey: GITEA__server__LFS_JWT_SECRET
refreshInterval: 5m
target:
creationPolicy: Owner
deletionPolicy: Delete
name: gitea-secrets
template:
mergePolicy: "Merge"
data: {}

View File

@ -5,7 +5,7 @@ metadata:
name: gitea name: gitea
namespace: gitea namespace: gitea
spec: spec:
ingressClassName: istio ingressClassName: haproxy
rules: rules:
- host: git.strudelline.net - host: git.strudelline.net
http: http:
@ -17,7 +17,3 @@ spec:
name: gitea name: gitea
port: port:
name: gitea name: gitea
tls:
- hosts:
- git.strudelline.net
secretName: wildcard-tls

View File

@ -6,6 +6,8 @@ metadata:
app: gitea app: gitea
name: gitea name: gitea
namespace: gitea namespace: gitea
annotations:
"reloader.stakater.com/auto": "true"
spec: spec:
podManagementPolicy: OrderedReady podManagementPolicy: OrderedReady
replicas: 1 replicas: 1
@ -24,16 +26,15 @@ spec:
spec: spec:
containers: containers:
- name: gitea - name: gitea
image: gitea/gitea:1.20.2 image: gitea/gitea:1.21.2
env: envFrom:
- name: GITEA__actions__ENABLED - configMapRef:
value: "true" name: gitea-config
- name: MINIO__server__ROOT_URL - secretRef:
value: https://git.strudelline.net/ name: gitea-secrets
- name: MINIO__server__DOMAIN - secretRef:
value: git.strudelline.net name: email-secret
- name: GITEA__actions__DEFAULT_ACTIONS_URL
value: https://github.com
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /api/healthz path: /api/healthz

45
gost-dns/deployment.yaml Normal file
View File

@ -0,0 +1,45 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: gost-dns
name: gost-dns
spec:
replicas: 1
selector:
matchLabels:
app: gost-dns
template:
metadata:
labels:
app: gost-dns
annotations:
k8s.v1.cni.cncf.io/networks: |
[{
"namespace": "cascade",
"name": "br0-static",
"ips": ["172.16.1.53/12"]
}]
spec:
containers:
- image: ginuerzh/gost:latest
imagePullPolicy: IfNotPresent
name: gost
command:
- gost
- -L
- dns://:53?mode=tcp&dns=https://1.1.1.3/dns-query
- -L
- dns://:53?mode=udp&dns=https://1.1.1.3/dns-query
- -L
- dns://:54?mode=tcp&dns=https://doh.cleanbrowsing.org/doh/family-filter/
- -L
- dns://:54?mode=udp&dns=https://doh.cleanbrowsing.org/doh/family-filter/
- -L
- dns://:153?mode=tcp&dns=https://1.1.1.1/dns-query
- -L
- dns://:153?mode=udp&dns=https://1.1.1.1/dns-query
#securityContext:
# capabilities:
# add: ["NET_ADMIN"]
restartPolicy: Always

4
gost-dns/ns.yaml Normal file
View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: gost-dns

83
grist/deploy.yaml Normal file
View File

@ -0,0 +1,83 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: grist
name: grist
namespace: grist
spec:
replicas: 1
selector:
matchLabels:
app: grist
template:
metadata:
labels:
app: grist
spec:
containers:
- name: grist
image: gristlabs/grist:latest
env:
- name: PORT
value: "8080"
- name: GRIST_SANDBOX_FLAVOR
value: gvisor
- name: GRIST_FORCE_LOGIN
value: "true"
- name: APP_HOME_URL
value: https://grist.strudelline.net
- name: GRIST_SINGLE_ORG
value: docs
- name: GRIST_FORWARD_AUTH_HEADER
value: X-Forwarded-Email
#- name: GRIST_FORWARD_AUTH_LOGIN_PATH
# value: /oauth2/sign_in
- name: GRIST_FORWARD_AUTH_LOGOUT_PATH
value: /oauth2/sign_out
- name: GRIST_SESSION_SECRET
valueFrom:
secretKeyRef:
name: grist-session-secret
key: password
ports:
- containerPort: 8080
name: http
protocol: TCP
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /persist
name: grist-persist
- name: oauth2-proxy
image: quay.io/oauth2-proxy/oauth2-proxy:v7.4.0
args:
- --http-address=0.0.0.0:4180
- --config=/config.cfg
ports:
- containerPort: 4180
name: http
protocol: TCP
volumeMounts:
- mountPath: /config.cfg
name: oauth2-proxy-config
subPath: oauth2-proxy.cfg
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /ping
port: http
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
restartPolicy: Always
volumes:
- name: grist-persist
persistentVolumeClaim:
claimName: grist-persist
- name: oauth2-proxy-config
secret:
optional: false
secretName: oidc-secret

View File

@ -0,0 +1,16 @@
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: grist-session-secret
namespace: grist
spec:
refreshInterval: "720h"
target:
name: grist-session-secret
dataFrom:
- sourceRef:
generatorRef:
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
name: quasi-base64

35
grist/ingress.yaml Normal file
View File

@ -0,0 +1,35 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: grist
namespace: grist
spec:
ingressClassName: haproxy
rules:
- host: grist.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: grist
port:
number: 4180
---
apiVersion: v1
kind: Service
metadata:
labels:
app: grist
name: grist
namespace: grist
spec:
selector:
app: grist
ports:
- name: http
port: 4180
protocol: TCP
targetPort: 4180

7
grist/ns.yaml Normal file
View File

@ -0,0 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: null
name: grist
spec: {}
status: {}

54
grist/oidc-secret.yaml Normal file
View File

@ -0,0 +1,54 @@
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: oidc-secret
namespace: grist
spec:
secretStoreRef:
kind: ClusterSecretStore
name: bitwarden
data:
- remoteRef:
key: oidc client - grist
property: password
secretKey: client_secret
- remoteRef:
key: oidc client - grist
property: username
secretKey: client_id
- remoteRef:
key: oidc client - grist
property: cookie-secret
secretKey: cookie_secret
refreshInterval: 5m
target:
creationPolicy: Owner
deletionPolicy: Delete
name: oidc-secret
template:
data:
oauth2-proxy.cfg: |
cookie_secret='{{ .cookie_secret }}'
cookie_domains=['werts.us','strudelline.net']
whitelist_domains=['.werts.us','.strudelline.net','strudelline.net','werts.us']
# only users with this domain will be let in
email_domains=["werts.us","strudelline.net","andariese.net"]
client_id="{{ .client_id }}"
client_secret="{{ .client_secret }}"
cookie_secure="true"
upstreams = [ "http://localhost:8080" ]
#skip_auth_routes = [
# "!=^/admin(/.*)?$"
#]
skip_provider_button = true
reverse_proxy = true
set_xauthrequest = true
provider="oidc"
oidc_issuer_url="https://auth.werts.us/realms/werts"
type: Opaque

14
grist/pvc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grist-persist
namespace: grist
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 10Gi
storageClassName: longhorn
volumeMode: Filesystem

View File

@ -0,0 +1,13 @@
---
apiVersion: generators.external-secrets.io/v1alpha1
kind: Password
metadata:
name: quasi-base64
namespace: grist
spec:
length: 32
digits: 5
symbols: 1
symbolCharacters: "-_"
noUpper: false
allowRepeat: true

2
harbor/deploy.sh Normal file
View File

@ -0,0 +1,2 @@
helm repo add harbor https://helm.goharbor.io && helm repo update
helm upgrade -i -n harbor --create-namespace harbor harbor/harbor -f values.yaml

1
harbor/diff.sh Normal file
View File

@ -0,0 +1 @@
helm diff upgrade -n harbor harbor harbor/harbor -f values.yaml

19
harbor/values.yaml Normal file
View File

@ -0,0 +1,19 @@
externalURL: https://harbor.strudelline.net
expose:
type: loadBalancer
tls:
enabled: true
certSource: secret
secret:
secretName: wildcard-tls
loadBalancer:
ports:
httpPort: 80
httpsPort: 443
IP: 172.16.17.115
persistence:
persistentVolumeClaim:
jobservice:
jobLog:
accessMode: ReadWriteMany

13
harbor/wildcard-tls.yaml Normal file
View File

@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-tls
namespace: harbor
spec:
secretName: wildcard-tls
issuerRef:
name: zerossl
kind: ClusterIssuer
dnsNames:
- strudelline.net
- '*.strudelline.net'

41
illa/deployment.yaml Normal file
View File

@ -0,0 +1,41 @@
---
# illa-builder deployment
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: illa
name: illa-builder
spec:
selector:
matchLabels:
app.kubernetes.io/name: illa-builder
replicas: 1
template:
metadata:
labels:
app.kubernetes.io/name: illa-builder
spec:
containers:
- image: docker.io/illasoft/illa-builder:latest
imagePullPolicy: Always
name: illa-builder
ports:
- containerPort: 2022
env:
- name: ILLA_DEPLOY_MODE
value: "self-host"
---
# illa-builder service
apiVersion: v1
kind: Service
metadata:
namespace: illa
name: illa-builder
spec:
ports:
- port: 2022
targetPort: 2022
protocol: TCP
type: NodePort
selector:
app.kubernetes.io/name: illa-builder

18
illa/ingress.yaml Normal file
View File

@ -0,0 +1,18 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: illa
namespace: illa
spec:
ingressClassName: haproxy
rules:
- host: illa.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: illa-builder
port:
number: 2022

View File

@ -38,20 +38,35 @@ data:
frontend https443 frontend https443
bind *:443 ssl crt /ssl-tmp/tls.pem bind *:443 ssl crt /ssl-tmp/tls.pem
http-request capture req.hdr(Host) len 255 http-request capture req.hdr(Host) len 255
http-request set-header X-Forwarded-Proto https
http-response replace-value Location http(://.*[.]werts[.]us/.*) https\1 http-response replace-value Location http(://.*[.]werts[.]us/.*) https\1
http-response replace-value Location http(://.*[.]strudelline[.]net/.*) https\1 http-response replace-value Location http(://.*[.]strudelline[.]net/.*) https\1
default_backend httpnodes default_backend httpnodes
frontend rtmp1935
bind *:1935
mode tcp
default_backend wertube1935
frontend proxy4443 frontend proxy4443
bind *:4443 ssl crt /ssl-tmp/tls.pem accept-proxy bind *:4443 ssl crt /ssl-tmp/tls.pem accept-proxy
http-request capture req.hdr(Host) len 255 http-request capture req.hdr(Host) len 255
http-request set-header X-Forwarded-Proto https
http-response replace-value Location http(://.*[.]werts[.]us/.*) https\1 http-response replace-value Location http(://.*[.]werts[.]us/.*) https\1
http-response replace-value Location http(://.*[.]strudelline[.]net/.*) https\1 http-response replace-value Location http(://.*[.]strudelline[.]net/.*) https\1
default_backend httpnodes default_backend httpnodes
backend wertube1935
mode tcp
balance leastconn
server s1 peertube-werts.peertube-werts.svc:1935 check
backend httpnodes backend httpnodes
option forwardfor option forwardfor
server s1 istio-ingressgateway.istio-system.svc.cluster.local:80 check #server s1 istio-ingressgateway.istio-system.svc.cluster.local:443 check ssl verify none
#server s2 172.16.17.5:443 check ssl verify none
# USE THE FRONTING PROXY PORT IN HAPROXY-INGRESS
server s1 172.16.17.82:81 check
frontend stats frontend stats
mode http mode http
@ -64,25 +79,27 @@ data:
stats refresh 10s stats refresh 10s
stats admin if LOCALHOST stats admin if LOCALHOST
--- ---
# This is a daemonset so that we can use local traffic policies.
# The whole point of this pod is to gather and preserve client IPs
# so local traffic policies are a must (kube-proxy will change the
# origin IP).
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: DaemonSet
metadata: metadata:
name: "haproxy-server" name: "haproxy-server"
namespace: "ingress-shim" namespace: "ingress-shim"
annotations: annotations:
"reloader.stakater.com/auto": "true" "reloader.stakater.com/auto": "true"
spec: spec:
replicas: 1
selector: selector:
matchLabels: matchLabels:
app: "haproxy-server" app: "haproxy-server"
strategy:
type: RollingUpdate
template: template:
metadata: metadata:
labels: labels:
app: "haproxy-server" app: "haproxy-server"
spec: spec:
terminationGracePeriodSeconds: 0
initContainers: initContainers:
- name: combine-certs - name: combine-certs
command: ["bash", "-c"] command: ["bash", "-c"]
@ -113,6 +130,9 @@ spec:
- containerPort: 443 - containerPort: 443
name: https name: https
protocol: TCP protocol: TCP
- containerPort: 1935
name: rtmp
protocol: TCP
- containerPort: 4443 - containerPort: 4443
name: proxys name: proxys
protocol: TCP protocol: TCP
@ -143,6 +163,7 @@ metadata:
metallb.universe.tf/loadBalancerIPs: 172.16.17.80 metallb.universe.tf/loadBalancerIPs: 172.16.17.80
spec: spec:
allocateLoadBalancerNodePorts: true allocateLoadBalancerNodePorts: true
# PRESERVE CLIENT IPS! THIS IS THE WHOLE POINT!
externalTrafficPolicy: Local externalTrafficPolicy: Local
internalTrafficPolicy: Local internalTrafficPolicy: Local
ipFamilies: ipFamilies:
@ -153,6 +174,10 @@ spec:
port: 80 port: 80
protocol: TCP protocol: TCP
targetPort: http targetPort: http
- name: rtmp-1935
port: 1935
protocol: TCP
targetPort: rtmp
- name: https-443 - name: https-443
port: 443 port: 443
protocol: TCP protocol: TCP

View File

@ -0,0 +1,19 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: wildcard-tls
namespace: ingress-shim
spec:
secretName: wildcard-tls
issuerRef:
name: zerossl
kind: ClusterIssuer
dnsNames:
- strudelline.net
- '*.strudelline.net'
- '*.notes.werts.us'
- '*.minio.strudelline.net'
- werts.us
- '*.werts.us'
- kn8v.com
- '*.kn8v.com'

91
jellyfin/deployment.yaml Normal file
View File

@ -0,0 +1,91 @@
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: jellyfin
namespace: jellyfin
labels:
app: jellyfin
spec:
replicas: 1
selector:
matchLabels:
app: jellyfin
template:
metadata:
annotations:
k8s.v1.cni.cncf.io/networks: |
[{
"namespace": "cascade",
"name": "br0-static",
"ips": ["172.16.1.77/12"]
}]
labels:
app: jellyfin
spec:
containers:
- name: jellyfin
image: jellyfin/jellyfin:latest
imagePullPolicy: Always
ports:
- containerPort: 8096
name: http
- containerPort: 8920
name: https
- containerPort: 1900
name: discovery1
protocol: UDP
- containerPort: 7359
name: discovery2
protocol: UDP
volumeMounts:
- name: jellyfin-data
mountPath: /config
subPath: config
- name: jellyfin-data
mountPath: /cache
subPath: cache
- name: dropbox
mountPath: /volume1/dropbox
- name: tv-shows
mountPath: /volume1/tv shows
- name: video
mountPath: /volume1/video
- name: movies
mountPath: /volume1/movies
resources:
requests:
cpu: 500m
memory: 2Gi
volumes:
- name: jellyfin-data
persistentVolumeClaim:
claimName: jellyfin-data
- name: dropbox
nfs:
server: 172.16.18.1
path: /volume1/dropbox
- name: tv-shows
nfs:
server: 172.16.18.1
path: /volume1/tv shows
- name: video
nfs:
server: 172.16.18.1
path: /volume1/video
- name: movies
nfs:
server: 172.16.18.1
path: /volume1/movies
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-data
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi

21
jellyfin/ingress.yaml Normal file
View File

@ -0,0 +1,21 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jellyfin
namespace: jellyfin
labels:
app: jellyfin
spec:
ingressClassName: haproxy
rules:
- host: jellyfin.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jellyfin
port:
name: http

4
jellyfin/ns.yaml Normal file
View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: jellyfin

14
jellyfin/service.yaml Normal file
View File

@ -0,0 +1,14 @@
---
kind: Service
apiVersion: v1
metadata:
name: jellyfin
namespace: jellyfin
spec:
selector:
app: jellyfin
ports:
- protocol: TCP
port: 80
name: http
targetPort: 8096

4
jenkins/0ns.yaml Normal file
View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: jenkins

3
jenkins/deploy.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/bash
#
helm upgrade -i --create-namespace -n jenkins jenkins jenkins/jenkins -f values.yaml

3
jenkins/diff.sh Normal file
View File

@ -0,0 +1,3 @@
#!/bin/bash
#
helm diff upgrade -n jenkins jenkins jenkins/jenkins -f values.yaml

19
jenkins/ingress.yaml Normal file
View File

@ -0,0 +1,19 @@
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: jenkins
namespace: jenkins
spec:
ingressClassName: haproxy
rules:
- host: jenkins.strudelline.net
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: jenkins
port:
name: http

13
jenkins/pvc.yaml Normal file
View File

@ -0,0 +1,13 @@
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jenkins
namespace: jenkins
spec:
storageClassName: longhorn
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 8Gi

4
jenkins/values.yaml Normal file
View File

@ -0,0 +1,4 @@
USER-SUPPLIED VALUES:
controller:
jenkinsUriPrefix: ""
jenkinsUrl: https://jenkins.strudelline.net

101
keycloak/debugger.yaml Normal file
View File

@ -0,0 +1,101 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: debugger
namespace: keycloak
spec:
ingressClassName: haproxy
rules:
- host: debug.werts.us
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: debugger
port:
number: 9009
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: keycloak
name: debugger
spec:
replicas: 1
selector:
matchLabels:
app: debugger
template:
metadata:
labels:
app: debugger
spec:
containers:
- image: beryju/oidc-test-client:latest
name: debugger
env:
- name: OIDC_DO_REFRESH
value: "false"
- name: OIDC_DO_INTROSPECTION
value: "false"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: debugger-oidc-secret
key: id
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: debugger-oidc-secret
key: secret
- name: OIDC_PROVIDER
value: https://auth.werts.us/realms/werts
- name: OIDC_ROOT_URL
value: https://debug.werts.us/
ports:
- containerPort: 9009
name: http
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: debugger
namespace: keycloak
spec:
ports:
- port: 9009
protocol: TCP
targetPort: 9009
selector:
app: debugger
type: ClusterIP
---
apiVersion: external-secrets.io/v1beta1
kind: ExternalSecret
metadata:
name: debugger-oidc-secret
namespace: keycloak
spec:
data:
- remoteRef:
key: oidc client - debugger
property: username
secretKey: id
- remoteRef:
key: oidc client - debugger
property: password
secretKey: secret
- remoteRef:
key: oidc client - debugger
property: discovery_url
secretKey: discovery_url
refreshInterval: 60s
secretStoreRef:
kind: ClusterSecretStore
name: bitwarden
target:
name: debugger-oidc-secret

80
keycloak/echoserver.yaml Normal file
View File

@ -0,0 +1,80 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: echoserver
namespace: keycloak
annotations:
ingress.kubernetes.io/oauth: oauth2_proxy
ingress.kubernetes.io/auth-url: https://auth.werts.us/oauth2/auth
ingress.kubernetes.io/auth-signin: https://auth.werts.us/oauth2/start?rd=https://echo.werts.us
ingress.kubernetes.io/auth-signout-not-implemented: https://auth.werts.us/realms/werts/protocol/openid-connect/logout
spec:
ingressClassName: haproxy
rules:
- host: echo.werts.us
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: echoserver
port:
number: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: echoserver-non-auth
namespace: keycloak
spec:
ingressClassName: haproxy
rules:
- host: echo.werts.us
http:
paths:
- path: /non-auth
pathType: Prefix
backend:
service:
name: echoserver
port:
number: 8080
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: keycloak
name: echoserver
spec:
replicas: 1
selector:
matchLabels:
app: echoserver
template:
metadata:
labels:
app: echoserver
spec:
containers:
- image: mendhak/http-https-echo:30
name: echoserver
ports:
- containerPort: 4180
name: http
protocol: TCP
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
name: echoserver
namespace: keycloak
spec:
ports:
- port: 8080
protocol: TCP
targetPort: 8080
selector:
app: echoserver
type: ClusterIP

Some files were not shown because too many files have changed in this diff Show More