Initial Commit 🎉

This commit is contained in:
Guilhem Lettron 2022-05-31 03:14:39 +02:00
commit 8f064a2890
66 changed files with 9488 additions and 0 deletions

12
.dockerignore Normal file
View File

@ -0,0 +1,12 @@
# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
# Ignore build and test binaries.
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
bin/
testbin/
Dockerfile
Makefile
*.md

25
.gitignore vendored Normal file
View File

@ -0,0 +1,25 @@
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
bin
testbin/*
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Kubernetes Generated files - skip generated files, except for vendored files
!vendor/**/zz_generated.*
# editor and IDE paraphernalia
.idea
*.swp
*.swo
*~

25
Dockerfile Normal file
View File

@ -0,0 +1,25 @@
# Build the manager binary
FROM golang:1.18 as builder
WORKDIR /workspace
# Copy the Go Modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache deps before building and copying source so that we don't need to re-download as much
# and so that source changes don't invalidate our downloaded layer
RUN go mod download
# Copy the go source
COPY . .
# Build
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -ldflags="-s -w" -o manager main.go
# Use distroless as minimal base image to package the manager binary
# Refer to https://github.com/GoogleContainerTools/distroless for more details
FROM gcr.io/distroless/static:nonroot
WORKDIR /
COPY --from=builder /workspace/manager .
USER 65532:65532
ENTRYPOINT ["/manager"]

201
LICENSE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

132
Makefile Normal file
View File

@ -0,0 +1,132 @@
# Image URL to use all building/pushing image targets
IMG ?= controller:latest
# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
ENVTEST_K8S_VERSION = 1.23
# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
ifeq (,$(shell go env GOBIN))
GOBIN=$(shell go env GOPATH)/bin
else
GOBIN=$(shell go env GOBIN)
endif
# Setting SHELL to bash allows bash commands to be executed by recipes.
# This is a requirement for 'setup-envtest.sh' in the test target.
# Options are set to exit when a recipe line exits non-zero or a piped command fails.
SHELL = /usr/bin/env bash -o pipefail
.SHELLFLAGS = -ec
.PHONY: all
all: build
##@ General
# The help target prints out all targets with their descriptions organized
# beneath their categories. The categories are represented by '##@' and the
# target descriptions by '##'. The awk commands is responsible for reading the
# entire set of makefiles included in this invocation, looking for lines of the
# file as xyz: ## something, and then pretty-format the target and help. Then,
# if there's a line with ##@ something, that gets pretty-printed as a category.
# More info on the usage of ANSI control characters for terminal formatting:
# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
# More info on the awk command:
# http://linuxcommand.org/lc3_adv_awk.php
.PHONY: help
help: ## Display this help.
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
##@ Development
.PHONY: manifests
manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
$(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
.PHONY: generate
generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
$(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
.PHONY: fmt
fmt: ## Run go fmt against code.
go fmt ./...
.PHONY: vet
vet: ## Run go vet against code.
go vet ./...
.PHONY: test
test: manifests generate fmt vet envtest ## Run tests.
KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out
##@ Build
.PHONY: build
build: generate fmt vet ## Build manager binary.
go build -ldflags="-s -w" -o bin/manager main.go
.PHONY: run
run: manifests generate fmt vet ## Run a controller from your host.
go run ./main.go
.PHONY: docker-build
docker-build: test ## Build docker image with the manager.
docker build --load -t ${IMG} .
.PHONY: docker-push
docker-push: ## Push docker image with the manager.
docker push ${IMG}
##@ Deployment
ifndef ignore-not-found
ignore-not-found = false
endif
.PHONY: install
install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
$(KUSTOMIZE) build config/crd | kubectl apply -f -
.PHONY: uninstall
uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
.PHONY: deploy
deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
$(KUSTOMIZE) build config/default | kubectl apply -f -
.PHONY: undeploy
undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
$(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f -
##@ Build Dependencies
## Location to install dependencies to
LOCALBIN ?= $(shell pwd)/bin
$(LOCALBIN):
mkdir -p $(LOCALBIN)
## Tool Binaries
KUSTOMIZE ?= $(LOCALBIN)/kustomize
CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
ENVTEST ?= $(LOCALBIN)/setup-envtest
## Tool Versions
KUSTOMIZE_VERSION ?= v4.5.5
CONTROLLER_TOOLS_VERSION ?= v0.9.0
.PHONY: kustomize
kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
$(KUSTOMIZE): $(LOCALBIN)
GOBIN=$(LOCALBIN) go install sigs.k8s.io/kustomize/kustomize/v4@$(KUSTOMIZE_VERSION)
.PHONY: controller-gen
controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
$(CONTROLLER_GEN): $(LOCALBIN)
GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION)
.PHONY: envtest
envtest: $(ENVTEST) ## Download envtest-setup locally if necessary.
$(ENVTEST): $(LOCALBIN)
GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest

34
PROJECT Normal file
View File

@ -0,0 +1,34 @@
domain: barpilot.io
layout:
- go.kubebuilder.io/v3
projectName: headscale-operator
repo: github.com/guilhem/headscale-operator
resources:
- api:
crdVersion: v1
namespaced: true
controller: true
domain: barpilot.io
group: headscale
kind: Server
path: github.com/guilhem/headscale-operator/api/v1alpha1
version: v1alpha1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: barpilot.io
group: headscale
kind: Namespace
path: github.com/guilhem/headscale-operator/api/v1alpha1
version: v1alpha1
- api:
crdVersion: v1
namespaced: true
controller: true
domain: barpilot.io
group: headscale
kind: PreAuthKey
path: github.com/guilhem/headscale-operator/api/v1alpha1
version: v1alpha1
version: "3"

101
README.md Normal file
View File

@ -0,0 +1,101 @@
# headscale-operator
A Kubernetes Operator to instanciate and control headscale instances.
## Description
With _headscale-operator_ you can instanciate multiple [headscale](https://github.com/juanfont/headscale) servers for multiple purpose.
_headscale-operator_ let you manage [_namespaces_](https://github.com/juanfont/headscale/blob/main/docs/glossary.md) and _preauthkeys_.
## Getting Started
Youll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster.
**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).
### Running on the cluster
1. Install Instances of Custom Resources:
```sh
kubectl apply -f config/samples/
```
1. Build and push your image to the location specified by `IMG`:
```sh
make docker-build docker-push IMG=<some-registry>/headscale-operator:tag
```
1. Deploy the controller to the cluster with the image specified by `IMG`:
```sh
make deploy IMG=<some-registry>/headscale-operator:tag
```
### Uninstall CRDs
To delete the CRDs from the cluster:
```sh
make uninstall
```
### Undeploy controller
UnDeploy the controller to the cluster:
```sh
make undeploy
```
### How it works
This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/)
It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/)
which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster
### Test It Out
1. Install the CRDs into the cluster:
```sh
make install
```
1. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running):
```sh
make run
```
**NOTE:** You can also run this in one step by running: `make install run`
### Modifying the API definitions
If you are editing the API definitions, generate the manifests such as CRs or CRDs using:
```sh
make manifests
```
**NOTE:** Run `make --help` for more information on all potential `make` targets
More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html)
## License
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,36 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1alpha1 contains API Schema definitions for the headscale v1alpha1 API group
//+kubebuilder:object:generate=true
//+groupName=headscale.barpilot.io
package v1alpha1
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"sigs.k8s.io/controller-runtime/pkg/scheme"
)
var (
// GroupVersion is group version used to register these objects
GroupVersion = schema.GroupVersion{Group: "headscale.barpilot.io", Version: "v1alpha1"}
// SchemeBuilder is used to add go types to the GroupVersionKind scheme
SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
// AddToScheme adds the types in this group-version to the given scheme.
AddToScheme = SchemeBuilder.AddToScheme
)

View File

@ -0,0 +1,56 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// NamespaceSpec defines the desired state of Namespace
type NamespaceSpec struct {
Server string `json:"server"`
}
// NamespaceStatus defines the observed state of Namespace
type NamespaceStatus struct {
Created bool `json:"created"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Namespace is the Schema for the namespaces API
type Namespace struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec NamespaceSpec `json:"spec,omitempty"`
Status NamespaceStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// NamespaceList contains a list of Namespace
type NamespaceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Namespace `json:"items"`
}
func init() {
SchemeBuilder.Register(&Namespace{}, &NamespaceList{})
}

View File

@ -0,0 +1,63 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// PreAuthKeySpec defines the desired state of PreAuthKey
type PreAuthKeySpec struct {
Namespace string `json:"namespace"`
Reusable bool `json:"reusable"`
Ephemeral bool `json:"ephemeral"`
Duration string `json:"duration"`
}
// PreAuthKeyStatus defines the observed state of PreAuthKey
type PreAuthKeyStatus struct {
Used bool `json:"used"`
ID string `json:"id"`
Expiration string `json:"expiration"`
CreatedAt string `json:"createdAt"`
Key string `json:"key"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// PreAuthKey is the Schema for the preauthkeys API
type PreAuthKey struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PreAuthKeySpec `json:"spec,omitempty"`
Status PreAuthKeyStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// PreAuthKeyList contains a list of PreAuthKey
type PreAuthKeyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []PreAuthKey `json:"items"`
}
func init() {
SchemeBuilder.Register(&PreAuthKey{}, &PreAuthKeyList{})
}

View File

@ -0,0 +1,85 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
"github.com/guilhem/headscale-operator/pkg/headscale"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ServerSpec defines the desired state of Server
type ServerSpec struct {
//+optional
Version string `json:"version"`
//+optional
//+kubebuilder:default=false
Debug bool `json:"debug"`
//+optional
Issuer string `json:"issuer,omitempty"`
//+optional
GrpcServiceName string `json:"grpcServiceName,omitempty"`
// +kubebuilder:validation:Schemaless
// +kubebuilder:pruning:PreserveUnknownFields
Config headscale.Config `json:"config,omitempty"`
// +kubebuilder:validation:Format=hostname
// +kubebuilder:validation:Required
Host string `json:"host,omitempty"`
// +optional
// +kubebuilder:validation:Schemaless
// +kubebuilder:pruning:PreserveUnknownFields
Ingress *networkingv1.Ingress `json:"ingress,omitempty"`
}
// ServerStatus defines the observed state of Server
type ServerStatus struct {
GrpcAddress string `json:"grpcAddress,omitempty"`
DeploymentName string `json:"deploymentName,omitempty"`
}
//+kubebuilder:object:root=true
//+kubebuilder:subresource:status
// Server is the Schema for the servers API
type Server struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec ServerSpec `json:"spec,omitempty"`
Status ServerStatus `json:"status,omitempty"`
}
//+kubebuilder:object:root=true
// ServerList contains a list of Server
type ServerList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Server `json:"items"`
}
func init() {
SchemeBuilder.Register(&Server{}, &ServerList{})
}

View File

@ -0,0 +1,300 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package v1alpha1
import (
"k8s.io/api/networking/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Namespace) DeepCopyInto(out *Namespace) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace.
func (in *Namespace) DeepCopy() *Namespace {
if in == nil {
return nil
}
out := new(Namespace)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Namespace) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceList) DeepCopyInto(out *NamespaceList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Namespace, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList.
func (in *NamespaceList) DeepCopy() *NamespaceList {
if in == nil {
return nil
}
out := new(NamespaceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NamespaceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec.
func (in *NamespaceSpec) DeepCopy() *NamespaceSpec {
if in == nil {
return nil
}
out := new(NamespaceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus.
func (in *NamespaceStatus) DeepCopy() *NamespaceStatus {
if in == nil {
return nil
}
out := new(NamespaceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreAuthKey) DeepCopyInto(out *PreAuthKey) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreAuthKey.
func (in *PreAuthKey) DeepCopy() *PreAuthKey {
if in == nil {
return nil
}
out := new(PreAuthKey)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PreAuthKey) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreAuthKeyList) DeepCopyInto(out *PreAuthKeyList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PreAuthKey, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreAuthKeyList.
func (in *PreAuthKeyList) DeepCopy() *PreAuthKeyList {
if in == nil {
return nil
}
out := new(PreAuthKeyList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PreAuthKeyList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreAuthKeySpec) DeepCopyInto(out *PreAuthKeySpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreAuthKeySpec.
func (in *PreAuthKeySpec) DeepCopy() *PreAuthKeySpec {
if in == nil {
return nil
}
out := new(PreAuthKeySpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PreAuthKeyStatus) DeepCopyInto(out *PreAuthKeyStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreAuthKeyStatus.
func (in *PreAuthKeyStatus) DeepCopy() *PreAuthKeyStatus {
if in == nil {
return nil
}
out := new(PreAuthKeyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Server) DeepCopyInto(out *Server) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server.
func (in *Server) DeepCopy() *Server {
if in == nil {
return nil
}
out := new(Server)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Server) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerList) DeepCopyInto(out *ServerList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Server, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerList.
func (in *ServerList) DeepCopy() *ServerList {
if in == nil {
return nil
}
out := new(ServerList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *ServerList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerSpec) DeepCopyInto(out *ServerSpec) {
*out = *in
in.Config.DeepCopyInto(&out.Config)
if in.Ingress != nil {
in, out := &in.Ingress, &out.Ingress
*out = new(v1.Ingress)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec.
func (in *ServerSpec) DeepCopy() *ServerSpec {
if in == nil {
return nil
}
out := new(ServerSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServerStatus) DeepCopyInto(out *ServerStatus) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatus.
func (in *ServerStatus) DeepCopy() *ServerStatus {
if in == nil {
return nil
}
out := new(ServerStatus)
in.DeepCopyInto(out)
return out
}

View File

@ -0,0 +1,55 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: namespaces.headscale.barpilot.io
spec:
group: headscale.barpilot.io
names:
kind: Namespace
listKind: NamespaceList
plural: namespaces
singular: namespace
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Namespace is the Schema for the namespaces API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: NamespaceSpec defines the desired state of Namespace
properties:
server:
type: string
required:
- server
type: object
status:
description: NamespaceStatus defines the observed state of Namespace
properties:
created:
type: boolean
required:
- created
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,76 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: preauthkeys.headscale.barpilot.io
spec:
group: headscale.barpilot.io
names:
kind: PreAuthKey
listKind: PreAuthKeyList
plural: preauthkeys
singular: preauthkey
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: PreAuthKey is the Schema for the preauthkeys API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: PreAuthKeySpec defines the desired state of PreAuthKey
properties:
duration:
type: string
ephemeral:
type: boolean
namespace:
type: string
reusable:
type: boolean
required:
- duration
- ephemeral
- namespace
- reusable
type: object
status:
description: PreAuthKeyStatus defines the observed state of PreAuthKey
properties:
createdAt:
type: string
expiration:
type: string
id:
type: string
key:
type: string
used:
type: boolean
required:
- createdAt
- expiration
- id
- key
- used
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,67 @@
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.9.0
creationTimestamp: null
name: servers.headscale.barpilot.io
spec:
group: headscale.barpilot.io
names:
kind: Server
listKind: ServerList
plural: servers
singular: server
scope: Namespaced
versions:
- name: v1alpha1
schema:
openAPIV3Schema:
description: Server is the Schema for the servers API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: ServerSpec defines the desired state of Server
properties:
config:
x-kubernetes-preserve-unknown-fields: true
debug:
default: false
type: boolean
grpcServiceName:
type: string
host:
format: hostname
type: string
ingress:
x-kubernetes-preserve-unknown-fields: true
issuer:
type: string
version:
type: string
type: object
status:
description: ServerStatus defines the observed state of Server
properties:
deploymentName:
type: string
grpcAddress:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}

View File

@ -0,0 +1,27 @@
# This kustomization.yaml is not intended to be run by itself,
# since it depends on service name and namespace that are out of this kustomize package.
# It should be run by config/default
resources:
- bases/headscale.barpilot.io_servers.yaml
- bases/headscale.barpilot.io_namespaces.yaml
- bases/headscale.barpilot.io_preauthkeys.yaml
#+kubebuilder:scaffold:crdkustomizeresource
patchesStrategicMerge:
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
# patches here are for enabling the conversion webhook for each CRD
#- patches/webhook_in_servers.yaml
#- patches/webhook_in_namespaces.yaml
#- patches/webhook_in_preauthkeys.yaml
#+kubebuilder:scaffold:crdkustomizewebhookpatch
# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
# patches here are for enabling the CA injection for each CRD
#- patches/cainjection_in_servers.yaml
#- patches/cainjection_in_namespaces.yaml
#- patches/cainjection_in_preauthkeys.yaml
#+kubebuilder:scaffold:crdkustomizecainjectionpatch
# the following config is for teaching kustomize how to do kustomization for CRDs.
configurations:
- kustomizeconfig.yaml

View File

@ -0,0 +1,19 @@
# This file is for teaching kustomize how to substitute name and namespace reference in CRD
nameReference:
- kind: Service
version: v1
fieldSpecs:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/name
namespace:
- kind: CustomResourceDefinition
version: v1
group: apiextensions.k8s.io
path: spec/conversion/webhook/clientConfig/service/namespace
create: false
varReference:
- path: metadata/annotations

View File

@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: namespaces.headscale.barpilot.io

View File

@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: preauthkeys.headscale.barpilot.io

View File

@ -0,0 +1,7 @@
# The following patch adds a directive for certmanager to inject CA into the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
annotations:
cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME)
name: servers.headscale.barpilot.io

View File

@ -0,0 +1,16 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: namespaces.headscale.barpilot.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -0,0 +1,16 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: preauthkeys.headscale.barpilot.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -0,0 +1,16 @@
# The following patch enables a conversion webhook for the CRD
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: servers.headscale.barpilot.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
service:
namespace: system
name: webhook-service
path: /convert
conversionReviewVersions:
- v1

View File

@ -0,0 +1,74 @@
# Adds namespace to all resources.
namespace: headscale-operator-system
# Value of this field is prepended to the
# names of all resources, e.g. a deployment named
# "wordpress" becomes "alices-wordpress".
# Note that it should also match with the prefix (text before '-') of the namespace
# field above.
namePrefix: headscale-operator-
# Labels to add to all resources and selectors.
#commonLabels:
# someName: someValue
bases:
- ../crd
- ../rbac
- ../manager
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- ../webhook
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
#- ../certmanager
# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
#- ../prometheus
patchesStrategicMerge:
# Protect the /metrics endpoint by putting it behind auth.
# If you want your controller-manager to expose the /metrics
# endpoint w/o any authn/z, please comment the following line.
- manager_auth_proxy_patch.yaml
# Mount the controller config file for loading manager configurations
# through a ComponentConfig type
#- manager_config_patch.yaml
# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
# crd/kustomization.yaml
#- manager_webhook_patch.yaml
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
# 'CERTMANAGER' needs to be enabled to use ca injection
#- webhookcainjection_patch.yaml
# the following config is for teaching kustomize how to do var substitution
vars:
# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
# fieldref:
# fieldpath: metadata.namespace
#- name: CERTIFICATE_NAME
# objref:
# kind: Certificate
# group: cert-manager.io
# version: v1
# name: serving-cert # this name should match the one in certificate.yaml
#- name: SERVICE_NAMESPACE # namespace of the service
# objref:
# kind: Service
# version: v1
# name: webhook-service
# fieldref:
# fieldpath: metadata.namespace
#- name: SERVICE_NAME
# objref:
# kind: Service
# version: v1
# name: webhook-service

View File

@ -0,0 +1,34 @@
# This patch inject a sidecar container which is a HTTP proxy for the
# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews.
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: kube-rbac-proxy
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.11.0
args:
- "--secure-listen-address=0.0.0.0:8443"
- "--upstream=http://127.0.0.1:8080/"
- "--logtostderr=true"
- "--v=0"
ports:
- containerPort: 8443
protocol: TCP
name: https
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 5m
memory: 64Mi
- name: manager
args:
- "--health-probe-bind-address=:8081"
- "--metrics-bind-address=127.0.0.1:8080"
- "--leader-elect"

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
spec:
template:
spec:
containers:
- name: manager
args:
- "--config=controller_manager_config.yaml"
volumeMounts:
- name: manager-config
mountPath: /controller_manager_config.yaml
subPath: controller_manager_config.yaml
volumes:
- name: manager-config
configMap:
name: manager-config

View File

@ -0,0 +1,11 @@
apiVersion: controller-runtime.sigs.k8s.io/v1alpha1
kind: ControllerManagerConfig
health:
healthProbeBindAddress: :8081
metrics:
bindAddress: 127.0.0.1:8080
webhook:
port: 9443
leaderElection:
leaderElect: true
resourceName: 67c702f3.barpilot.io

View File

@ -0,0 +1,16 @@
resources:
- manager.yaml
generatorOptions:
disableNameSuffixHash: true
configMapGenerator:
- files:
- controller_manager_config.yaml
name: manager-config
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: europe-west1-docker.pkg.dev/themecloud-dev/test/headscale-operator
newTag: test1

View File

@ -0,0 +1,61 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: system
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: controller-manager
namespace: system
labels:
control-plane: controller-manager
spec:
selector:
matchLabels:
control-plane: controller-manager
replicas: 1
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: manager
labels:
control-plane: controller-manager
spec:
securityContext:
runAsNonRoot: true
containers:
- command:
- /manager
args:
- --leader-elect
image: controller:latest
imagePullPolicy: Always
name: manager
securityContext:
allowPrivilegeEscalation: false
livenessProbe:
httpGet:
path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
readinessProbe:
httpGet:
path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
# TODO(user): Configure the resources accordingly based on the project requirements.
# More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
resources:
limits:
cpu: 500m
memory: 128Mi
requests:
cpu: 10m
memory: 64Mi
serviceAccountName: controller-manager
terminationGracePeriodSeconds: 10

View File

@ -0,0 +1,2 @@
resources:
- monitor.yaml

View File

@ -0,0 +1,20 @@
# Prometheus Monitor Service (Metrics)
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-monitor
namespace: system
spec:
endpoints:
- path: /metrics
port: https
scheme: https
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
tlsConfig:
insecureSkipVerify: true
selector:
matchLabels:
control-plane: controller-manager

View File

@ -0,0 +1,9 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: metrics-reader
rules:
- nonResourceURLs:
- "/metrics"
verbs:
- get

View File

@ -0,0 +1,17 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: proxy-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: controller-manager-metrics-service
namespace: system
spec:
ports:
- name: https
port: 8443
protocol: TCP
targetPort: https
selector:
control-plane: controller-manager

View File

@ -0,0 +1,18 @@
resources:
# All RBAC will be applied under this service account in
# the deployment namespace. You may comment out this resource
# if your manager will use a service account that exists at
# runtime. Be sure to update RoleBinding and ClusterRoleBinding
# subjects if changing service account names.
- service_account.yaml
- role.yaml
- role_binding.yaml
- leader_election_role.yaml
- leader_election_role_binding.yaml
# Comment the following 4 lines if you want to disable
# the auth proxy (https://github.com/brancz/kube-rbac-proxy)
# which protects your /metrics endpoint.
- auth_proxy_service.yaml
- auth_proxy_role.yaml
- auth_proxy_role_binding.yaml
- auth_proxy_client_clusterrole.yaml

View File

@ -0,0 +1,37 @@
# permissions to do leader election.
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: leader-election-role
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: leader-election-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: leader-election-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -0,0 +1,24 @@
# permissions for end users to edit namespaces.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: namespace-editor-role
rules:
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view namespaces.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: namespace-viewer-role
rules:
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces/status
verbs:
- get

View File

@ -0,0 +1,24 @@
# permissions for end users to edit preauthkeys.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: preauthkey-editor-role
rules:
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view preauthkeys.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: preauthkey-viewer-role
rules:
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys
verbs:
- get
- list
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys/status
verbs:
- get

145
config/rbac/role.yaml Normal file
View File

@ -0,0 +1,145 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: manager-role
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- cert-manager.io
resources:
- certificates
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- services
verbs:
- create
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces/finalizers
verbs:
- update
- apiGroups:
- headscale.barpilot.io
resources:
- namespaces/status
verbs:
- get
- patch
- update
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys/finalizers
verbs:
- update
- apiGroups:
- headscale.barpilot.io
resources:
- preauthkeys/status
verbs:
- get
- patch
- update
- apiGroups:
- headscale.barpilot.io
resources:
- servers
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- servers/finalizers
verbs:
- update
- apiGroups:
- headscale.barpilot.io
resources:
- servers/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.k8s.io
resources:
- ingresses
verbs:
- create
- get
- list
- patch
- update
- watch

View File

@ -0,0 +1,12 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: manager-role
subjects:
- kind: ServiceAccount
name: controller-manager
namespace: system

View File

@ -0,0 +1,24 @@
# permissions for end users to edit servers.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: server-editor-role
rules:
- apiGroups:
- headscale.barpilot.io
resources:
- servers
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- servers/status
verbs:
- get

View File

@ -0,0 +1,20 @@
# permissions for end users to view servers.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: server-viewer-role
rules:
- apiGroups:
- headscale.barpilot.io
resources:
- servers
verbs:
- get
- list
- watch
- apiGroups:
- headscale.barpilot.io
resources:
- servers/status
verbs:
- get

View File

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: controller-manager
namespace: system

View File

@ -0,0 +1,6 @@
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
name: selfsigned
spec:
selfSigned: {}

View File

@ -0,0 +1,6 @@
apiVersion: headscale.barpilot.io/v1alpha1
kind: Namespace
metadata:
name: namespace-sample
spec:
server: server-sample

View File

@ -0,0 +1,9 @@
apiVersion: headscale.barpilot.io/v1alpha1
kind: PreAuthKey
metadata:
name: preauthkey-sample
spec:
namespace: namespace-sample
reusable: true
ephemeral: true
duration: 1h

View File

@ -0,0 +1,7 @@
apiVersion: headscale.barpilot.io/v1alpha1
kind: Server
metadata:
name: server-sample
spec:
version: 0.15.0
issuer: selfsigned

38
controllers/default.go Normal file
View File

@ -0,0 +1,38 @@
package controllers
import (
"time"
"github.com/guilhem/headscale-operator/pkg/headscale"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
)
var defaultServerConfig = headscale.Config{
Addr: "0.0.0.0:8080",
MetricsAddr: "0.0.0.0:8081",
// GRPCAddr: "0.0.0.0:8081",
DERP: headscale.DERPConfig{
Server: headscale.DERPConfigServer{
Enabled: pointer.Bool(false),
RegionID: 999,
RegionCode: "headscale",
RegionName: "Headscale Embedded DERP",
STUNAddr: "0.0.0.0:3478",
},
URLs: []string{"https://controlplane.tailscale.com/derpmap/default"},
AutoUpdate: pointer.Bool(true),
Paths: []string{},
UpdateFrequency: metav1.Duration{Duration: time.Hour * 1},
},
EphemeralNodeInactivityTimeout: metav1.Duration{Duration: time.Hour * 24},
// ACMEURL: "https://acme-v02.api.letsencrypt.org/directory",
// ACMEEmail: "",
DNSConfig: headscale.DNSConfig{
Nameservers: []string{"1.1.1.1"},
Magic: pointer.Bool(true),
Domains: []string{},
BaseDomain: "",
},
LogLevel: "info",
}

View File

@ -0,0 +1,159 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
headscalev1alpha1 "github.com/guilhem/headscale-operator/api/v1alpha1"
"github.com/guilhem/headscale-operator/pkg/utils"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
)
// NamespaceReconciler reconciles a Namespace object
type NamespaceReconciler struct {
client.Client
Scheme *runtime.Scheme
}
//+kubebuilder:rbac:groups=headscale.barpilot.io,resources=namespaces,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=headscale.barpilot.io,resources=namespaces/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=headscale.barpilot.io,resources=namespaces/finalizers,verbs=update
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Namespace object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile
func (r *NamespaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
instance := new(headscalev1alpha1.Namespace)
if err := r.Get(ctx, req.NamespacedName, instance); err != nil {
log.Error(err, "unable to fetch Namespace")
// we'll ignore not-found errors, since they can't be fixed by an immediate
// requeue (we'll need to wait for a new notification), and we can get them
// on deleted requests.
return ctrl.Result{}, client.IgnoreNotFound(err)
}
server := &headscalev1alpha1.Server{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Spec.Server,
Namespace: instance.Namespace,
},
}
if err := r.Get(ctx, client.ObjectKeyFromObject(server), server); err != nil {
log.Error(err, "unable to fetch Server")
return ctrl.Result{}, err
}
if server.Status.GrpcAddress == "" {
err := errors.New("Server not ready")
log.Error(err, "GrpcAddress empty")
return ctrl.Result{}, err
}
clientCtx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
client, err := utils.NewHeadscaleServiceClient(clientCtx, server.Status.GrpcAddress)
if err != nil {
return ctrl.Result{}, err
}
log.Info("Headscale Client created", "GrpcAddress", server.Status.GrpcAddress)
// examine DeletionTimestamp to determine if object is under deletion
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
if !controllerutil.ContainsFinalizer(instance, Finalizer) {
controllerutil.AddFinalizer(instance, Finalizer)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(instance, Finalizer) {
if _, err := client.DeleteNamespace(ctx, &v1.DeleteNamespaceRequest{Name: instance.Name}); utils.IgnoreNotFound(err) != nil {
return ctrl.Result{}, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(instance, Finalizer)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
// Stop reconciliation as the item is being deleted
return ctrl.Result{}, nil
}
logNamespace := log.WithValues("namespace", instance.Name)
if _, err := client.GetNamespace(ctx, &v1.GetNamespaceRequest{Name: instance.Name}); err != nil {
if utils.IgnoreNotFound(err) != nil {
logNamespace.Error(err, "Can't get Namespace")
return ctrl.Result{}, err
}
if _, err := client.CreateNamespace(ctx, &v1.CreateNamespaceRequest{Name: instance.Name}); err != nil {
logNamespace.Error(err, "can't create Namespace")
return ctrl.Result{}, err
}
logNamespace.Info("Namespace Created")
} else {
logNamespace.Info("Namespace Already Exists")
}
instance.Status.Created = true
if err := r.Status().Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
// reconcile every 30s
return ctrl.Result{RequeueAfter: time.Second * 30}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *NamespaceReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&headscalev1alpha1.Namespace{}).
Complete(r)
}

View File

@ -0,0 +1,201 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"errors"
"fmt"
"time"
"golang.org/x/exp/slices"
"google.golang.org/protobuf/types/known/timestamppb"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
headscalev1alpha1 "github.com/guilhem/headscale-operator/api/v1alpha1"
"github.com/guilhem/headscale-operator/pkg/utils"
v1 "github.com/juanfont/headscale/gen/go/headscale/v1"
)
// PreAuthKeyReconciler reconciles a PreAuthKey object
type PreAuthKeyReconciler struct {
client.Client
Scheme *runtime.Scheme
recorder record.EventRecorder
}
//+kubebuilder:rbac:groups=headscale.barpilot.io,resources=preauthkeys,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=headscale.barpilot.io,resources=preauthkeys/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=headscale.barpilot.io,resources=preauthkeys/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile
func (r *PreAuthKeyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
log.Info("Begin")
instance := new(headscalev1alpha1.PreAuthKey)
if err := r.Get(ctx, req.NamespacedName, instance); err != nil {
log.Error(err, "unable to fetch PreAuthKey")
// we'll ignore not-found errors, since they can't be fixed by an immediate
// requeue (we'll need to wait for a new notification), and we can get them
// on deleted requests.
return ctrl.Result{}, client.IgnoreNotFound(err)
}
namespace := &headscalev1alpha1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Spec.Namespace,
Namespace: instance.Namespace,
},
}
if err := r.Get(ctx, client.ObjectKeyFromObject(namespace), namespace); err != nil {
log.Error(err, "unable to fetch Namespace")
return ctrl.Result{}, err
}
if !namespace.Status.Created {
return ctrl.Result{}, errors.New("Namespace not created")
}
server := &headscalev1alpha1.Server{
ObjectMeta: metav1.ObjectMeta{
Name: namespace.Spec.Server,
Namespace: instance.Namespace,
},
}
if err := r.Get(ctx, client.ObjectKeyFromObject(server), server); err != nil {
log.Error(err, "unable to fetch Server")
return ctrl.Result{}, err
}
if server.Status.GrpcAddress == "" {
log.Info("Server not ready")
return ctrl.Result{}, errors.New("Server not ready")
}
clientCtx, cancel := context.WithTimeout(ctx, time.Second*10)
defer cancel()
client, err := utils.NewHeadscaleServiceClient(clientCtx, server.Status.GrpcAddress)
if err != nil {
return ctrl.Result{}, err
}
// examine DeletionTimestamp to determine if object is under deletion
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
if !controllerutil.ContainsFinalizer(instance, Finalizer) {
controllerutil.AddFinalizer(instance, Finalizer)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(instance, Finalizer) {
if _, err := client.ExpirePreAuthKey(ctx, &v1.ExpirePreAuthKeyRequest{Key: instance.Name, Namespace: namespace.Name}); utils.IgnoreNotFound(err) != nil {
return ctrl.Result{}, err
}
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(instance, Finalizer)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
// Stop reconciliation as the item is being deleted
return ctrl.Result{}, nil
}
list, err := client.ListPreAuthKeys(ctx, &v1.ListPreAuthKeysRequest{Namespace: namespace.Name})
if err != nil {
return ctrl.Result{}, err
}
preAuthKey := new(v1.PreAuthKey)
if i := slices.IndexFunc(list.PreAuthKeys, func(pak *v1.PreAuthKey) bool { return pak.Namespace == namespace.Name }); i >= 0 {
// PreAuthKey already exist
preAuthKey = list.PreAuthKeys[i]
} else {
createPreAuthKeyRequest := &v1.CreatePreAuthKeyRequest{
Namespace: namespace.Name,
Reusable: instance.Spec.Reusable,
Ephemeral: instance.Spec.Ephemeral,
}
if d := instance.Spec.Duration; d != "" {
duration, err := time.ParseDuration(d)
if err != nil {
return ctrl.Result{}, err
}
createPreAuthKeyRequest.Expiration = timestamppb.New(time.Now().Add(duration))
}
createApiKeyResponse, err := client.CreatePreAuthKey(ctx, createPreAuthKeyRequest)
if err != nil {
return ctrl.Result{}, err
}
r.recorder.Event(instance, "Normal", "Created", fmt.Sprintf("PreAuthKey %s created", preAuthKey.Key))
preAuthKey = createApiKeyResponse.PreAuthKey
}
instance.Status.Used = preAuthKey.GetUsed()
instance.Status.ID = preAuthKey.GetId()
instance.Status.CreatedAt = preAuthKey.GetCreatedAt().AsTime().String()
instance.Status.Expiration = preAuthKey.GetExpiration().AsTime().String()
instance.Status.Key = preAuthKey.GetKey()
if err := r.Status().Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
// check every 30s
return ctrl.Result{RequeueAfter: time.Second * 30}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *PreAuthKeyReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.recorder = mgr.GetEventRecorderFor("preauthkey-controller")
return ctrl.NewControllerManagedBy(mgr).
For(&headscalev1alpha1.PreAuthKey{}).
Complete(r)
}

View File

@ -0,0 +1,656 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"encoding/json"
"fmt"
"net"
"path"
"path/filepath"
"strconv"
"github.com/imdario/mergo"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/tools/record"
"k8s.io/utils/pointer"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/log"
headscalev1alpha1 "github.com/guilhem/headscale-operator/api/v1alpha1"
"github.com/guilhem/headscale-operator/pkg/utils"
)
// ServerReconciler reconciles a Server object
type ServerReconciler struct {
client.Client
Scheme *runtime.Scheme
recorder record.EventRecorder
}
const Finalizer = "headscale.barpilot.io/finalizer"
const ConfigFileName = "config.json"
// +kubebuilder:rbac:groups=headscale.barpilot.io,resources=servers,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=headscale.barpilot.io,resources=servers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=headscale.barpilot.io,resources=servers/finalizers,verbs=update
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=cert-manager.io,resources=certificates,verbs=get;list;watch;create;update;patch
// +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch
// Reconcile is part of the main kubernetes reconciliation loop which aims to
// move the current state of the cluster closer to the desired state.
// TODO(user): Modify the Reconcile function to compare the state specified by
// the Server object against the actual cluster state, and then
// perform operations to make the cluster state reflect the state specified by
// the user.
//
// For more details, check Reconcile and its Result here:
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.11.2/pkg/reconcile
func (r *ServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx)
instance := new(headscalev1alpha1.Server)
if err := r.Get(ctx, req.NamespacedName, instance); err != nil {
log.Error(err, "unable to fetch Server")
// we'll ignore not-found errors, since they can't be fixed by an immediate
// requeue (we'll need to wait for a new notification), and we can get them
// on deleted requests.
return ctrl.Result{}, client.IgnoreNotFound(err)
}
// examine DeletionTimestamp to determine if object is under deletion
if instance.ObjectMeta.DeletionTimestamp.IsZero() {
// The object is not being deleted, so if it does not have our finalizer,
// then lets add the finalizer and update the object. This is equivalent
// registering our finalizer.
if !controllerutil.ContainsFinalizer(instance, Finalizer) {
controllerutil.AddFinalizer(instance, Finalizer)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
} else {
// The object is being deleted
if controllerutil.ContainsFinalizer(instance, Finalizer) {
// remove our finalizer from the list and update it.
controllerutil.RemoveFinalizer(instance, Finalizer)
if err := r.Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
// Stop reconciliation as the item is being deleted
return ctrl.Result{}, nil
}
config := instance.Spec.Config
log.Info("Config before default", "config", config)
// Default value
if err := mergo.Merge(&config, defaultServerConfig); err != nil {
return ctrl.Result{}, err
}
log.Info("Config after default", "config", config)
// _, grpcPort, err := utils.SliptHostPort(instance.Spec.Config.GRPCAddr)
// if err != nil {
// return ctrl.Result{}, err
// }
labels := labels.Set{
"app.kubernetes.io/name": "headscale",
"app.kubernetes.io/instance": instance.Name,
"app.kubernetes.io/managed-by": "headscale-operator",
"app.kubernetes.io/component": "server",
}
////////////////
// Service
////////////////
const grpcInsecurePort = 8082
service := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-grpc", instance.Name),
Namespace: instance.Namespace,
},
}
if op, err := controllerutil.CreateOrUpdate(ctx, r.Client, service, func() error {
if err := controllerutil.SetControllerReference(instance, service, r.Scheme); err != nil {
return err
}
service.ObjectMeta.Labels = labels
service.Spec.Type = corev1.ServiceTypeClusterIP
ports := []corev1.ServicePort{
{
Name: "server",
Protocol: corev1.ProtocolTCP,
Port: 80,
TargetPort: intstr.FromString("server"),
},
{
Name: "grpc-insecure",
Protocol: corev1.ProtocolTCP,
Port: int32(grpcInsecurePort),
TargetPort: intstr.FromString("grpc-insecure"),
},
}
if err := mergo.Merge(&service.Spec.Ports, ports, mergo.WithOverride); err != nil {
return err
}
service.Spec.Selector = labels
return nil
}); err != nil {
r.recorder.Event(instance, "Warning", "Failed", fmt.Sprintf("Fail to reconcile Service %s", service.Name))
log.Error(err, "Service reconcile failed")
} else {
switch op {
case controllerutil.OperationResultCreated:
r.recorder.Event(instance, "Normal", "Created", fmt.Sprintf("Created Sevice %s", service.Name))
case controllerutil.OperationResultUpdated:
r.recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("Updated Sevice %s", service.Name))
}
}
if service.Spec.ClusterIP != "" {
instance.Status.GrpcAddress = net.JoinHostPort(service.Spec.ClusterIP, strconv.Itoa(grpcInsecurePort))
if err := r.Status().Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
// ////////////////
// // Service LB
// ////////////////
// serviceLB := &corev1.Service{
// ObjectMeta: metav1.ObjectMeta{
// Name: fmt.Sprintf("%s-lb", instance.Name),
// Namespace: instance.Namespace,
// },
// }
// if op, err := controllerutil.CreateOrPatch(ctx, r.Client, serviceLB, func() error {
// if err := controllerutil.SetControllerReference(instance, serviceLB, r.Scheme); err != nil {
// return err
// }
// serviceLB.Spec.Type = corev1.ServiceTypeLoadBalancer
// ports := []corev1.ServicePort{
// {
// Name: "server",
// Protocol: corev1.ProtocolTCP,
// Port: 443,
// TargetPort: intstr.FromString("server"),
// },
// }
// if err := mergo.Merge(&serviceLB.Spec.Ports, ports); err != nil {
// return err
// }
// serviceLB.Spec.Selector = labels
// return nil
// }); err != nil {
// log.Error(err, "Service LB reconcile failed")
// } else {
// switch op {
// case controllerutil.OperationResultCreated:
// r.recorder.Event(instance, "Normal", "Created", fmt.Sprintf("Created Sevice %s", service.Name))
// case controllerutil.OperationResultUpdated:
// r.recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("Updated Sevice %s", service.Name))
// }
// }
deployment := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
},
}
// certificate := &certmanagerv1.Certificate{
// ObjectMeta: metav1.ObjectMeta{
// Name: instance.Name,
// Namespace: instance.Namespace,
// },
// }
// if op, err := controllerutil.CreateOrUpdate(ctx, r.Client, certificate, func() error {
// if err := controllerutil.SetControllerReference(instance, certificate, r.Scheme); err != nil {
// return err
// }
// certificate.Spec.SecretName = fmt.Sprintf("%s-tls", certificate.Name)
// certificate.Spec.IssuerRef = certmanagerv1metav1.ObjectReference{
// Name: instance.Spec.Issuer,
// }
// certificate.Spec.DNSNames = []string{"test.com"}
// ips := service.Spec.ClusterIPs
// ips = append(ips, serviceLB.Spec.ClusterIPs...)
// ips = append(ips, serviceLB.Spec.ExternalIPs...)
// certificate.Spec.IPAddresses = ips
// return nil
// }); err != nil {
// log.Error(err, "certificate reconcile failed")
// } else {
// if op != controllerutil.OperationResultNone {
// log.Info("certificate successfully reconciled", "operation", op)
// }
// }
// const keypath = "/run/headscale/certs"
// instance.Spec.Config.TLSCertPath = path.Join(keypath, "tls.crt")
// instance.Spec.Config.TLSKeyPath = path.Join(keypath, "tls.key")
const runSocketsPath = "/run/headscale/socket"
const socketName = "headscale.sock"
config.UnixSocket = path.Join(runSocketsPath, socketName)
config.UnixSocketPermission = "0770"
const dataPath = "/var/lib/headscale"
const sqlitename = "db.sqlite"
config.DBtype = "sqlite3"
config.DBpath = path.Join(dataPath, sqlitename)
config.PrivateKeyPath = filepath.Join(dataPath, "private.key")
config.Addr = "0.0.0.0:8080"
config.ServerURL = fmt.Sprintf("https://%s", instance.Spec.Host)
if instance.Spec.Debug {
config.LogLevel = "debug"
}
// config.GRPCAllowInsecure = pointer.Bool(true)
///////////////////////
// Configmap
///////////////////////
configmap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
},
Data: make(map[string]string),
}
if op, err := controllerutil.CreateOrUpdate(ctx, r.Client, configmap, func() error {
if err := controllerutil.SetControllerReference(instance, configmap, r.Scheme); err != nil {
return err
}
configmap.ObjectMeta.Labels = labels
c, err := json.MarshalIndent(config, "", " ")
if err != nil {
return err
}
configmap.BinaryData = map[string][]byte{}
configmap.Data[ConfigFileName] = string(c)
return nil
}); err != nil {
r.recorder.Event(instance, "Warning", "Failed", fmt.Sprintf("Fail to reconcile Service %s", configmap.Name))
} else {
switch op {
case controllerutil.OperationResultCreated:
r.recorder.Event(instance, "Normal", "Created", fmt.Sprintf("Created Configmap %s", configmap.Name))
case controllerutil.OperationResultUpdated:
r.recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("Updated Configmap %s", configmap.Name))
}
}
////////////////
// Deployment
////////////////
if op, err := controllerutil.CreateOrUpdate(ctx, r.Client, deployment, func() error {
if err := controllerutil.SetControllerReference(instance, deployment, r.Scheme); err != nil {
return err
}
deployment.ObjectMeta.Labels = labels
version := instance.Spec.Version
if version == "" {
version = "latest"
}
_, listenPort, err := utils.SliptHostPort(config.Addr)
if err != nil {
return fmt.Errorf("can't parse config.Addr %w", err)
}
_, metricsPort, err := utils.SliptHostPort(config.MetricsAddr)
if err != nil {
return fmt.Errorf("can't parse config.MetricsAddr %w", err)
}
// _, grpcPort, err := utils.SliptHostPort(instance.Spec.Config.GRPCAddr)
// if err != nil {
// return err
// }
// immuable
if deployment.ObjectMeta.CreationTimestamp.IsZero() {
deployment.Spec.Selector = metav1.SetAsLabelSelector(labels)
}
deployment.Spec.Replicas = pointer.Int32(1)
podTemplate := corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"config-version": configmap.GetResourceVersion(),
// "certificate-version": certificate.GetResourceVersion(),
},
Labels: labels,
},
Spec: corev1.PodSpec{
// InitContainers: []corev1.Container{
// {
// Name: "init-litestream",
// Image: "litestream/litestream:0.3.8",
// Args: []string{"restore", "-if-db-not-exists", "-if-replica-exists", "-v", instance.Spec.Config.DBpath},
// VolumeMounts: []corev1.VolumeMount{
// {
// Name: "data",
// MountPath: sqlitedir,
// },
// },
// },
// },
Containers: []corev1.Container{
{
Name: "headscale",
Image: fmt.Sprintf("headscale/headscale:%s", version),
ImagePullPolicy: corev1.PullAlways,
Command: []string{"headscale", "serve"},
VolumeMounts: []corev1.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: "/etc/headscale/",
},
// {
// Name: "certificate",
// ReadOnly: true,
// MountPath: keypath,
// },
{
Name: "data",
MountPath: dataPath,
},
{
Name: "run",
MountPath: runSocketsPath,
},
},
Ports: []corev1.ContainerPort{
{
Name: "server",
ContainerPort: int32(listenPort),
},
{
Name: "metrics",
ContainerPort: int32(metricsPort),
},
// {
// Name: "grpc",
// ContainerPort: int32(grpcPort),
// },
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromString("server"),
Path: "/health",
Scheme: corev1.URISchemeHTTP,
},
},
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
HTTPGet: &corev1.HTTPGetAction{
Port: intstr.FromString("server"),
Path: "/health",
Scheme: corev1.URISchemeHTTP,
},
},
},
},
{
Name: "socat",
Image: "alpine/socat:1.7.4.3-r0",
Args: []string{
"tcp-listen:8082,fork,reuseaddr",
fmt.Sprintf("unix-connect:%s", path.Join(runSocketsPath, socketName)),
},
Ports: []corev1.ContainerPort{
{
Name: "grpc-insecure",
ContainerPort: int32(grpcInsecurePort),
},
},
VolumeMounts: []corev1.VolumeMount{
{
Name: "run",
ReadOnly: true,
MountPath: runSocketsPath,
},
},
LivenessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
Port: intstr.FromString("grpc-insecure"),
},
},
},
ReadinessProbe: &corev1.Probe{
ProbeHandler: corev1.ProbeHandler{
TCPSocket: &corev1.TCPSocketAction{
Port: intstr.FromString("grpc-insecure"),
},
},
},
},
},
Volumes: []corev1.Volume{
{
Name: "config",
VolumeSource: corev1.VolumeSource{
ConfigMap: &corev1.ConfigMapVolumeSource{
LocalObjectReference: corev1.LocalObjectReference{
Name: configmap.Name,
},
},
},
},
// {
// Name: "certificate",
// VolumeSource: corev1.VolumeSource{
// Secret: &corev1.SecretVolumeSource{
// SecretName: certificate.Spec.SecretName,
// },
// },
// },
{
Name: "data",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
{
Name: "run",
VolumeSource: corev1.VolumeSource{
EmptyDir: &corev1.EmptyDirVolumeSource{},
},
},
},
},
}
if err := mergo.Merge(&deployment.Spec.Template, podTemplate, mergo.WithOverride); err != nil {
return err
}
return nil
}); err != nil {
log.Error(err, "Deployment reconcile failed")
} else {
if op != controllerutil.OperationResultNone {
log.Info("Deployment successfully reconciled", "operation", op)
}
instance.Status.DeploymentName = deployment.Name
if err := r.Status().Update(ctx, instance); err != nil {
return ctrl.Result{}, err
}
}
// ingress := instance.Spec.Ingress
// if ingress == nil {
// ingress = &networkingv1.Ingress{}
// }
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: instance.Name,
Namespace: instance.Namespace,
},
}
// ingress.SetName(instance.Name)
// ingress.SetNamespace(instance.Namespace)
// ingress.ObjectMeta.Name = instance.Name
// ingress.ObjectMeta.Namespace = instance.Namespace
if op, err := controllerutil.CreateOrUpdate(ctx, r.Client, ingress, func() error {
if err := controllerutil.SetControllerReference(instance, ingress, r.Scheme); err != nil {
return err
}
if instance.Spec.Ingress != nil {
if err := mergo.Merge(ingress, instance.Spec.Ingress); err != nil {
return err
}
}
instance.ObjectMeta.Labels = labels
var prefixPathType = networkingv1.PathTypePrefix
rules := []networkingv1.IngressRule{
{
Host: instance.Spec.Host,
IngressRuleValue: networkingv1.IngressRuleValue{
HTTP: &networkingv1.HTTPIngressRuleValue{
Paths: []networkingv1.HTTPIngressPath{
{
Path: "/",
// PathType: (*networkingv1.PathType)(pointer.String(string(networkingv1.PathTypePrefix))),
PathType: &prefixPathType,
Backend: networkingv1.IngressBackend{
Service: &networkingv1.IngressServiceBackend{
Name: service.GetName(),
Port: networkingv1.ServiceBackendPort{
Name: "server",
},
},
},
},
},
},
},
},
}
if err := mergo.Merge(&ingress.Spec.Rules, rules, mergo.WithOverride); err != nil {
return err
}
tls := []networkingv1.IngressTLS{
{
Hosts: []string{
instance.Spec.Host,
},
SecretName: fmt.Sprintf("%s-certificats", instance.GetName()),
},
}
if err := mergo.Merge(&ingress.Spec.TLS, tls, mergo.WithOverride); err != nil {
return err
}
return nil
}); err != nil {
log.Error(err, "Fail to reconcile Ingress", "ingress", ingress)
r.recorder.Event(instance, "Warning", "Failed", fmt.Sprintf("Fail to reconcile Ingress %s", ingress.Name))
} else {
switch op {
case controllerutil.OperationResultCreated:
r.recorder.Event(instance, "Normal", "Created", fmt.Sprintf("Created Ingress %s", ingress.Name))
case controllerutil.OperationResultUpdated:
r.recorder.Event(instance, "Normal", "Updated", fmt.Sprintf("Updated Ingress %s", ingress.Name))
}
}
return ctrl.Result{}, nil
}
// SetupWithManager sets up the controller with the Manager.
func (r *ServerReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.recorder = mgr.GetEventRecorderFor("server-controller")
return ctrl.NewControllerManagedBy(mgr).
For(&headscalev1alpha1.Server{}).
// Owns(&certmanagerv1.Certificate{}).
Owns(&corev1.ConfigMap{}).
Owns(&corev1.Service{}).
Owns(&appsv1.Deployment{}).
Owns(&networkingv1.Ingress{}).
Complete(r)
}

View File

@ -0,0 +1,144 @@
package controllers_test
import (
"context"
"errors"
"time"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/tidwall/gjson"
corev1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client"
headscalev1alpha1 "github.com/guilhem/headscale-operator/api/v1alpha1"
"github.com/guilhem/headscale-operator/controllers"
"github.com/guilhem/headscale-operator/pkg/headscale"
)
var _ = Describe("Controllers/ServerController", func() {
const (
InstanceName = "test-server"
InstanceNamespace = "default"
Host = "test.domain.com"
timeout = time.Second * 30
duration = "10s"
interval = "1s"
)
Context("When creating new server", func() {
It("Should create Server", func() {
ctx := context.Background()
By("Creating a new Instance")
instance := &headscalev1alpha1.Server{
ObjectMeta: metav1.ObjectMeta{
Name: InstanceName,
Namespace: InstanceNamespace,
},
Spec: headscalev1alpha1.ServerSpec{
Version: "0.15.O",
Ingress: &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{},
Spec: networkingv1.IngressSpec{
IngressClassName: pointer.String("ingress-class"),
},
},
Config: headscale.Config{
LogLevel: "debug",
},
Host: Host,
},
}
// ingress := &networkingv1.Ingress{
// ObjectMeta: metav1.ObjectMeta{},
// Spec: networkingv1.IngressSpec{
// IngressClassName: pointer.String("ingressClass"),
// },
// }
Expect(k8sClient.Create(ctx, instance)).Should(Succeed())
By("Should have deployment")
Eventually(func() (int, error) {
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(instance), instance); err != nil {
return -1, err
}
return len(instance.Status.DeploymentName), nil
}, duration, interval).ShouldNot(Equal(0))
By("Should have the rigth configuration")
Eventually(func() (string, error) {
confimap := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: InstanceName,
Namespace: InstanceNamespace,
},
}
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(confimap), confimap); err != nil {
return "", err
}
json := confimap.Data[controllers.ConfigFileName]
if !gjson.Valid(json) {
return "", errors.New("not valid json")
}
logLevel := gjson.Get(json, "log_level")
return logLevel.String(), nil
}, duration, interval).Should(Equal("debug"))
By("Should have ingress")
ingress := &networkingv1.Ingress{
ObjectMeta: metav1.ObjectMeta{
Name: InstanceName,
Namespace: InstanceNamespace,
},
}
Eventually(func() (int, error) {
if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(ingress), ingress); err != nil {
return -1, err
}
size := len(ingress.Spec.Rules)
if ingress.Spec.Rules[0].Host != Host {
return size, errors.New("host unmatch")
}
if *ingress.Spec.IngressClassName != "ingress-class" {
return size, errors.New("missing ingress class")
}
return size, nil
}, duration, interval).Should(Equal(1))
// By("Should have pod ready")
// createdDeployment := &appsv1.Deployment{}
// Eventually(func() (int, error) {
// if err := k8sClient.Get(ctx, types.NamespacedName{Name: instance.Status.DeploymentName, Namespace: instance.Namespace}, createdDeployment); err != nil {
// return -1, err
// }
// return int(createdDeployment.Status.AvailableReplicas), nil
// }, duration, interval).ShouldNot(BeZero())
})
})
})

118
controllers/suite_test.go Normal file
View File

@ -0,0 +1,118 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers_test
import (
"context"
"path/filepath"
"testing"
"time"
ctrl "sigs.k8s.io/controller-runtime"
certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
headscalev1alpha1 "github.com/guilhem/headscale-operator/api/v1alpha1"
"github.com/guilhem/headscale-operator/controllers"
//+kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var (
cfg *rest.Config
k8sClient client.Client
testEnv *envtest.Environment
ctx context.Context
cancel context.CancelFunc
)
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func() {
logger := zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))
logf.SetLogger(logger)
ctx, cancel = context.WithTimeout(context.TODO(), time.Minute)
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{
filepath.Join("..", "config", "crd", "bases"),
filepath.Join("testing-assets", "crd"),
},
ErrorIfCRDPathMissing: true,
AttachControlPlaneOutput: false,
}
var err error
// cfg is defined in this file globally.
cfg, err = testEnv.Start()
Expect(err).NotTo(HaveOccurred())
Expect(cfg).NotTo(BeNil())
err = headscalev1alpha1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
Expect(certmanagerv1.AddToScheme(scheme.Scheme)).NotTo(HaveOccurred())
//+kubebuilder:scaffold:scheme
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).NotTo(HaveOccurred())
Expect(k8sClient).NotTo(BeNil())
ctrl.SetLogger(logger)
k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&controllers.ServerReconciler{
Client: k8sManager.GetClient(),
Scheme: k8sManager.GetScheme(),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
defer GinkgoRecover()
err = k8sManager.Start(ctx)
Expect(err).ToNot(HaveOccurred(), "failed to run manager")
}()
})
var _ = AfterSuite(func() {
cancel()
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).NotTo(HaveOccurred())
})

File diff suppressed because it is too large Load Diff

91
go.mod Normal file
View File

@ -0,0 +1,91 @@
module github.com/guilhem/headscale-operator
go 1.18
require (
github.com/cert-manager/cert-manager v1.8.0
github.com/imdario/mergo v0.3.13
github.com/juanfont/headscale v0.15.0
github.com/onsi/ginkgo/v2 v2.1.4
github.com/onsi/gomega v1.19.0
golang.org/x/exp v0.0.0-20220518171630-0b5c67f07fdf
google.golang.org/grpc v1.44.0
google.golang.org/protobuf v1.27.1
k8s.io/api v0.24.0
k8s.io/apimachinery v0.24.0
k8s.io/client-go v0.24.0
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9
sigs.k8s.io/controller-runtime v0.12.1-0.20220602164547-f46919744bee
)
require (
cloud.google.com/go v0.99.0 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.20 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.15 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/logger v0.2.1 // indirect
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/PuerkitoBio/purell v1.1.1 // indirect
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/fsnotify/fsnotify v1.5.1 // indirect
github.com/go-logr/logr v1.2.0 // indirect
github.com/go-logr/zapr v1.2.0 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.19.5 // indirect
github.com/go-openapi/swag v0.19.14 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.0.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/gnostic v0.5.7-v3refs // indirect
github.com/google/go-cmp v0.5.7 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.3 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/tidwall/gjson v1.14.1 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.19.1 // indirect
golang.org/x/crypto v0.0.0-20220214200702-86341886e292 // indirect
golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b // indirect
golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8 // indirect
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220228195345-15d65a4533f7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect
k8s.io/apiextensions-apiserver v0.24.0 // indirect
k8s.io/component-base v0.24.0 // indirect
k8s.io/klog/v2 v2.60.1 // indirect
k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect
sigs.k8s.io/gateway-api v0.4.1 // indirect
sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)

1187
go.sum Normal file

File diff suppressed because it is too large Load Diff

15
hack/boilerplate.go.txt Normal file
View File

@ -0,0 +1,15 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

121
main.go Normal file
View File

@ -0,0 +1,121 @@
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"os"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
certmanagerv1 "github.com/cert-manager/cert-manager/pkg/apis/certmanager/v1"
headscalev1alpha1 "github.com/guilhem/headscale-operator/api/v1alpha1"
"github.com/guilhem/headscale-operator/controllers"
//+kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(headscalev1alpha1.AddToScheme(scheme))
utilruntime.Must(certmanagerv1.AddToScheme(scheme))
//+kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var probeAddr string
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Port: 9443,
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "67c702f3.barpilot.io",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err := (&controllers.ServerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Server")
os.Exit(1)
}
if err := (&controllers.NamespaceReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "Namespace")
os.Exit(1)
}
if err := (&controllers.PreAuthKeyReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "PreAuthKey")
os.Exit(1)
}
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
setupLog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

99
pkg/headscale/config.go Normal file
View File

@ -0,0 +1,99 @@
//+kubebuilder:object:generate=true
package headscale
import (
"crypto/tls"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Config struct {
ServerURL string `json:"server_url,omitempty"`
Addr string `json:"listen_addr,omitempty"`
MetricsAddr string `json:"metrics_listen_addr,omitempty"`
GRPCAddr string `json:"grpc_listen_addr,omitempty"`
GRPCAllowInsecure *bool `json:"grpc_allow_insecure,omitempty"`
EphemeralNodeInactivityTimeout metav1.Duration `json:"ephemeral_node_inactivity_timeout,omitempty"`
IPPrefixes []string `json:"ip_prefixes,omitempty"`
PrivateKeyPath string `json:"private_key_path,omitempty"`
DERP DERPConfig `json:"derp,omitempty"`
DBtype string `json:"db_type,omitempty"`
DBpath string `json:"db_path,omitempty"`
DBhost string `json:"db_host,omitempty"`
DBport int `json:"db_port,omitempty"`
DBname string `json:"db_name,omitempty"`
DBuser string `json:"db_user,omitempty"`
DBpass string `json:"db_pass,omitempty"`
TLSLetsEncryptListen string `json:"tls_letsencrypt_listen,omitempty"`
TLSLetsEncryptHostname string `json:"tls_letsencrypt_hostname,omitempty"`
TLSLetsEncryptCacheDir string `json:"tls_letsencrypt_cache_dir,omitempty"`
TLSLetsEncryptChallengeType string `json:"tls_letsencrypt_challenge_type,omitempty"`
TLSCertPath string `json:"tls_cert_path,omitempty"`
TLSKeyPath string `json:"tls_key_path,omitempty"`
TLSClientAuthMode tls.ClientAuthType `json:"tls_client_auth_mode,omitempty"`
ACMEURL string `json:"acme_url,omitempty"`
ACMEEmail string `json:"acme_email,omitempty"`
DNSConfig DNSConfig `json:"dns_config,omitempty"`
UnixSocket string `json:"unix_socket,omitempty"`
UnixSocketPermission string `json:"unix_socket_permission,omitempty"`
OIDC OIDCConfig `json:"oidc,omitempty"`
LogTail LogTailConfig `json:"logtail,omitempty"`
LogLevel string `json:"log_level,omitempty"`
CLI CLIConfig `json:"cli,omitempty"`
}
type OIDCConfig struct {
Issuer string `json:"issuer,omitempty"`
ClientID string `json:"client_id,omitempty"`
ClientSecret string `json:"client_secret,omitempty"`
Scope []string `json:"scope,omitempty"`
ExtraParams map[string]string `json:"extra_params,omitempty"`
AllowedDomains []string `json:"allowed_domains,omitempty"`
AllowedUsers []string `json:"allowed_users,omitempty"`
StripEmaildomain *bool `json:"strip_email_domain,omitempty"`
}
type DERPConfig struct {
Server DERPConfigServer `json:"server,omitempty"`
AutoUpdate *bool `json:"auto_update_enabled,omitempty"`
URLs []string `json:"urls,omitempty"`
Paths []string `json:"paths,omitempty"`
UpdateFrequency metav1.Duration `json:"update_frequency,omitempty"`
}
type DNSConfig struct {
Magic *bool `json:"magic_dns,omitempty"`
BaseDomain string `json:"base_domain,omitempty"`
Nameservers []string `json:"nameservers,omitempty"`
Domains []string `json:"domains,omitempty"`
}
type DERPConfigServer struct {
Enabled *bool `json:"enabled,omitempty"`
RegionCode string `json:"region_code,omitempty"`
RegionName string `json:"region_name,omitempty"`
STUNAddr string `json:"stun_listen_addr,omitempty"`
RegionID int `json:"region_id,omitempty"`
}
type LogTailConfig struct {
Enabled *bool `json:"enable,omitempty"`
}
type CLIConfig struct {
Insecure *bool `json:"insecure,omitempty"`
Address string `json:"address,omitempty"`
APIKey string `json:"api_key,omitempty"`
Timeout metav1.Duration `json:"timeout,omitempty"`
}

View File

@ -0,0 +1,220 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright 2022 Guilhem Lettron.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by controller-gen. DO NOT EDIT.
package headscale
import ()
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CLIConfig) DeepCopyInto(out *CLIConfig) {
*out = *in
if in.Insecure != nil {
in, out := &in.Insecure, &out.Insecure
*out = new(bool)
**out = **in
}
out.Timeout = in.Timeout
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CLIConfig.
func (in *CLIConfig) DeepCopy() *CLIConfig {
if in == nil {
return nil
}
out := new(CLIConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Config) DeepCopyInto(out *Config) {
*out = *in
if in.GRPCAllowInsecure != nil {
in, out := &in.GRPCAllowInsecure, &out.GRPCAllowInsecure
*out = new(bool)
**out = **in
}
out.EphemeralNodeInactivityTimeout = in.EphemeralNodeInactivityTimeout
if in.IPPrefixes != nil {
in, out := &in.IPPrefixes, &out.IPPrefixes
*out = make([]string, len(*in))
copy(*out, *in)
}
in.DERP.DeepCopyInto(&out.DERP)
in.DNSConfig.DeepCopyInto(&out.DNSConfig)
in.OIDC.DeepCopyInto(&out.OIDC)
in.LogTail.DeepCopyInto(&out.LogTail)
in.CLI.DeepCopyInto(&out.CLI)
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config.
func (in *Config) DeepCopy() *Config {
if in == nil {
return nil
}
out := new(Config)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DERPConfig) DeepCopyInto(out *DERPConfig) {
*out = *in
in.Server.DeepCopyInto(&out.Server)
if in.AutoUpdate != nil {
in, out := &in.AutoUpdate, &out.AutoUpdate
*out = new(bool)
**out = **in
}
if in.URLs != nil {
in, out := &in.URLs, &out.URLs
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Paths != nil {
in, out := &in.Paths, &out.Paths
*out = make([]string, len(*in))
copy(*out, *in)
}
out.UpdateFrequency = in.UpdateFrequency
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DERPConfig.
func (in *DERPConfig) DeepCopy() *DERPConfig {
if in == nil {
return nil
}
out := new(DERPConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DERPConfigServer) DeepCopyInto(out *DERPConfigServer) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DERPConfigServer.
func (in *DERPConfigServer) DeepCopy() *DERPConfigServer {
if in == nil {
return nil
}
out := new(DERPConfigServer)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DNSConfig) DeepCopyInto(out *DNSConfig) {
*out = *in
if in.Magic != nil {
in, out := &in.Magic, &out.Magic
*out = new(bool)
**out = **in
}
if in.Nameservers != nil {
in, out := &in.Nameservers, &out.Nameservers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Domains != nil {
in, out := &in.Domains, &out.Domains
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfig.
func (in *DNSConfig) DeepCopy() *DNSConfig {
if in == nil {
return nil
}
out := new(DNSConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LogTailConfig) DeepCopyInto(out *LogTailConfig) {
*out = *in
if in.Enabled != nil {
in, out := &in.Enabled, &out.Enabled
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogTailConfig.
func (in *LogTailConfig) DeepCopy() *LogTailConfig {
if in == nil {
return nil
}
out := new(LogTailConfig)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OIDCConfig) DeepCopyInto(out *OIDCConfig) {
*out = *in
if in.Scope != nil {
in, out := &in.Scope, &out.Scope
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ExtraParams != nil {
in, out := &in.ExtraParams, &out.ExtraParams
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.AllowedDomains != nil {
in, out := &in.AllowedDomains, &out.AllowedDomains
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AllowedUsers != nil {
in, out := &in.AllowedUsers, &out.AllowedUsers
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.StripEmaildomain != nil {
in, out := &in.StripEmaildomain, &out.StripEmaildomain
*out = new(bool)
**out = **in
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OIDCConfig.
func (in *OIDCConfig) DeepCopy() *OIDCConfig {
if in == nil {
return nil
}
out := new(OIDCConfig)
in.DeepCopyInto(out)
return out
}

37
pkg/utils/grpc.go Normal file
View File

@ -0,0 +1,37 @@
package utils
import (
"context"
headscaleapiv1 "github.com/juanfont/headscale/gen/go/headscale/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials/insecure"
)
func NewHeadscaleServiceClient(ctx context.Context, address string) (headscaleapiv1.HeadscaleServiceClient, error) {
grpcOptions := []grpc.DialOption{
grpc.WithBlock(),
}
grpcOptions = append(grpcOptions,
grpc.WithTransportCredentials(insecure.NewCredentials()),
)
// tlsConfig := &tls.Config{
// // turn of gosec as we are intentionally setting
// // insecure.
// //nolint:gosec
// InsecureSkipVerify: true,
// }
// grpcOptions = append(grpcOptions,
// grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),
// )
conn, err := grpc.DialContext(ctx, address, grpcOptions...)
if err != nil {
return nil, err
}
return headscaleapiv1.NewHeadscaleServiceClient(conn), nil
}

35
pkg/utils/utils.go Normal file
View File

@ -0,0 +1,35 @@
package utils
import (
"net"
"strconv"
"strings"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func SliptHostPort(hostport string) (string, int, error) {
host, sListenPort, err := net.SplitHostPort(hostport)
if err != nil {
return "", 0, err
}
listenPort, err := strconv.Atoi(sListenPort)
if err != nil {
return "", 0, err
}
return host, listenPort, nil
}
func IgnoreNotFound(err error) error {
status, ok := status.FromError(err)
if !ok {
return err
}
if (status.Code() == codes.NotFound) || strings.Contains(status.Message(), "not found") {
return nil
}
return err
}