initial commit

This commit is contained in:
Fishandchips321 2026-04-15 14:07:27 +01:00
commit dc7d39e5f1
12 changed files with 810 additions and 0 deletions

442
Headscale/config.yml Normal file
View file

@ -0,0 +1,442 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: headscale-config
data:
config.yaml: |
# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order:
#
# - `/etc/headscale`
# - `~/.headscale`
# - current working directory
# The url clients will connect to.
# Typically this will be a domain like:
#
# https://myheadscale.example.com:443
#
# server_url: http://127.0.0.1:8080
server_url: https://headscale.foxhawk.co.uk
# Address to listen to / bind to on the server
#
# For production:
listen_addr: 0.0.0.0:8080
# listen_addr: 127.0.0.1:8080
# Address to listen to /metrics and /debug, you may want
# to keep this endpoint private to your internal network
# Use an emty value to disable the metrics listener.
metrics_listen_addr: 127.0.0.1:9090
# Address to listen for gRPC.
# gRPC is used for controlling a headscale server
# remotely with the CLI
# Note: Remote access _only_ works if you have
# valid certificates.
#
# For production:
# grpc_listen_addr: 0.0.0.0:50443
grpc_listen_addr: 127.0.0.1:50443
# Allow the gRPC admin interface to run in INSECURE
# mode. This is not recommended as the traffic will
# be unencrypted. Only enable if you know what you
# are doing.
grpc_allow_insecure: false
# The Noise section includes specific configuration for the
# TS2021 Noise protocol
noise:
# The Noise private key is used to encrypt the traffic between headscale and
# Tailscale clients when using the new Noise-based protocol. A missing key
# will be automatically generated.
private_key_path: /var/lib/headscale/noise_private.key
# List of IP prefixes to allocate tailaddresses from.
# Each prefix consists of either an IPv4 or IPv6 address,
# and the associated prefix length, delimited by a slash.
# It must be within IP ranges supported by the Tailscale
# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48.
# See below:
# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71
# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33
# Any other range is NOT supported, and it will cause unexpected issues.
prefixes:
v4: 100.64.0.0/10
v6: fd7a:115c:a1e0::/48
# Strategy used for allocation of IPs to nodes, available options:
# - sequential (default): assigns the next free IP from the previous given
# IP. A best-effort approach is used and Headscale might leave holes in the
# IP range or fill up existing holes in the IP range.
# - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand).
allocation: sequential
# DERP is a relay system that Tailscale uses when a direct
# connection cannot be established.
# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp
#
# headscale needs a list of DERP servers that can be presented
# to the clients.
derp:
server:
# If enabled, runs the embedded DERP server and merges it into the rest of the DERP config
# The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place
enabled: false
# Region ID to use for the embedded DERP server.
# The local DERP prevails if the region ID collides with other region ID coming from
# the regular DERP config.
region_id: 999
# Region code and name are displayed in the Tailscale UI to identify a DERP region
region_code: "headscale"
region_name: "Headscale Embedded DERP"
# Only allow clients associated with this server access
verify_clients: true
# Listens over UDP at the configured address for STUN connections - to help with NAT traversal.
# When the embedded DERP server is enabled stun_listen_addr MUST be defined.
#
# For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/
stun_listen_addr: "0.0.0.0:3478"
# Private key used to encrypt the traffic between headscale DERP and
# Tailscale clients. A missing key will be automatically generated.
private_key_path: /var/lib/headscale/derp_server_private.key
# This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically,
# it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths
# If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths
automatically_add_embedded_derp_region: true
# For better connection stability (especially when using an Exit-Node and DNS is not working),
# it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using:
ipv4: 198.51.100.1
ipv6: 2001:db8::1
# List of externally available DERP maps encoded in JSON
urls:
- https://controlplane.tailscale.com/derpmap/default
# Locally available DERP map files encoded in YAML
#
# This option is mostly interesting for people hosting
# their own DERP servers:
# https://tailscale.com/kb/1118/custom-derp-servers/
#
# paths:
# - /etc/headscale/derp-example.yaml
paths: []
# If enabled, a worker will be set up to periodically
# refresh the given sources and update the derpmap
# will be set up.
auto_update_enabled: true
# How often should we check for DERP updates?
update_frequency: 3h
# Disables the automatic check for headscale updates on startup
disable_check_updates: false
# Time before an inactive ephemeral node is deleted?
ephemeral_node_inactivity_timeout: 30m
database:
# Database type. Available options: sqlite, postgres
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# All new development, testing and optimisations are done with SQLite in mind.
type: sqlite
# Enable debug mode. This setting requires the log.level to be set to "debug" or "trace".
debug: false
# GORM configuration settings.
gorm:
# Enable prepared statements.
prepare_stmt: true
# Enable parameterized queries.
parameterized_queries: true
# Skip logging "record not found" errors.
skip_err_record_not_found: true
# Threshold for slow queries in milliseconds.
slow_threshold: 1000
# SQLite config
sqlite:
path: /var/lib/headscale/db.sqlite
# Enable WAL mode for SQLite. This is recommended for production environments.
# https://www.sqlite.org/wal.html
write_ahead_log: true
# Maximum number of WAL file frames before the WAL file is automatically checkpointed.
# https://www.sqlite.org/c3ref/wal_autocheckpoint.html
# Set to 0 to disable automatic checkpointing.
wal_autocheckpoint: 1000
# # Postgres config
# Please note that using Postgres is highly discouraged as it is only supported for legacy reasons.
# See database.type for more information.
# postgres:
# # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank.
# host: localhost
# port: 5432
# name: headscale
# user: foo
# pass: bar
# max_open_conns: 10
# max_idle_conns: 10
# conn_max_idle_time_secs: 3600
# # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need
# # in the 'ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1.
# ssl: false
### TLS configuration
#
## Let's encrypt / ACME
#
# headscale supports automatically requesting and setting up
# TLS for a domain with Let's Encrypt.
#
# URL to ACME directory
#acme_url: https://acme-v02.api.letsencrypt.org/directory
# Email to register with ACME provider
#acme_email: ""
# Domain name to request a TLS certificate for:
#tls_letsencrypt_hostname: ""
# Path to store certificates and metadata needed by
# letsencrypt
# For production:
#tls_letsencrypt_cache_dir: /var/lib/headscale/cache
# Type of ACME challenge to use, currently supported types:
# HTTP-01 or TLS-ALPN-01
# See: docs/ref/tls.md for more information
#tls_letsencrypt_challenge_type: HTTP-01
# When HTTP-01 challenge is chosen, letsencrypt must set up a
# verification endpoint, and it will be listening on:
# :http = port 80
#tls_letsencrypt_listen: ":http"
## Use already defined certificates:
#tls_cert_path: ""
#tls_key_path: ""
log:
# Valid log levels: panic, fatal, error, warn, info, debug, trace
level: info
# Output formatting for logs: text or json
format: text
## Policy
# headscale supports Tailscale's ACL policies.
# Please have a look to their KB to better
# understand the concepts: https://tailscale.com/kb/1018/acls/
policy:
# The mode can be "file" or "database" that defines
# where the ACL policies are stored and read from.
mode: file
# If the mode is set to "file", the path to a
# HuJSON file containing ACL policies.
path: ""
## DNS
#
# headscale supports Tailscale's DNS configuration and MagicDNS.
# Please have a look to their KB to better understand the concepts:
#
# - https://tailscale.com/kb/1054/dns/
# - https://tailscale.com/kb/1081/magicdns/
# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/
#
# Please note that for the DNS configuration to have any effect,
# clients must have the `--accept-dns=true` option enabled. This is the
# default for the Tailscale client. This option is enabled by default
# in the Tailscale client.
#
# Setting _any_ of the configuration and `--accept-dns=true` on the
# clients will integrate with the DNS manager on the client or
# overwrite /etc/resolv.conf.
# https://tailscale.com/kb/1235/resolv-conf
#
# If you want stop Headscale from managing the DNS configuration
# all the fields under `dns` should be set to empty values.
dns:
# Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
magic_dns: true
# Defines the base domain to create the hostnames for MagicDNS.
# This domain _must_ be different from the server_url domain.
# `base_domain` must be a FQDN, without the trailing dot.
# The FQDN of the hosts will be
# `hostname.base_domain` (e.g., _myhost.example.com_).
base_domain: foxhawk.vpn
# Whether to use the local DNS settings of a node or override the local DNS
# settings (default) and force the use of Headscale's DNS configuration.
override_local_dns: true
# List of DNS servers to expose to clients.
nameservers:
global:
- 1.1.1.1
- 1.0.0.1
- 2606:4700:4700::1111
- 2606:4700:4700::1001
# NextDNS (see https://tailscale.com/kb/1218/nextdns/).
# "abc123" is example NextDNS ID, replace with yours.
# - https://dns.nextdns.io/abc123
# Split DNS (see https://tailscale.com/kb/1054/dns/),
# a map of domains and which DNS server to use for each.
split: {}
# foo.bar.com:
# - 1.1.1.1
# darp.headscale.net:
# - 1.1.1.1
# - 8.8.8.8
# Set custom DNS search domains. With MagicDNS enabled,
# your tailnet base_domain is always the first search domain.
search_domains: []
# Extra DNS records
# so far only A and AAAA records are supported (on the tailscale side)
# See: docs/ref/dns.md
extra_records: []
# - name: "grafana.myvpn.example.com"
# type: "A"
# value: "100.64.0.3"
#
# # you can also put it in one line
# - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" }
#
# Alternatively, extra DNS records can be loaded from a JSON file.
# Headscale processes this file on each change.
# extra_records_path: /var/lib/headscale/extra-records.json
# Unix socket used for the CLI to connect without authentication
# Note: for production you will want to set this to something like:
unix_socket: /var/run/headscale/headscale.sock
unix_socket_permission: "0770"
# OpenID Connect
# oidc:
# # Block startup until the identity provider is available and healthy.
# only_start_if_oidc_is_available: true
#
# # OpenID Connect Issuer URL from the identity provider
# issuer: "https://your-oidc.issuer.com/path"
#
# # Client ID from the identity provider
# client_id: "your-oidc-client-id"
#
# # Client secret generated by the identity provider
# # Note: client_secret and client_secret_path are mutually exclusive.
# client_secret: "your-oidc-client-secret"
# # Alternatively, set `client_secret_path` to read the secret from the file.
# # It resolves environment variables, making integration to systemd's
# # `LoadCredential` straightforward:
# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret"
#
# # The amount of time a node is authenticated with OpenID until it expires
# # and needs to reauthenticate.
# # Setting the value to "0" will mean no expiry.
# expiry: 180d
#
# # Use the expiry from the token received from OpenID when the user logged
# # in. This will typically lead to frequent need to reauthenticate and should
# # only be enabled if you know what you are doing.
# # Note: enabling this will cause `oidc.expiry` to be ignored.
# use_expiry_from_token: false
#
# # The OIDC scopes to use, defaults to "openid", "profile" and "email".
# # Custom scopes can be configured as needed, be sure to always include the
# # required "openid" scope.
# scope: ["openid", "profile", "email"]
#
# # Only verified email addresses are synchronized to the user profile by
# # default. Unverified emails may be allowed in case an identity provider
# # does not send the "email_verified: true" claim or email verification is
# # not required.
# email_verified_required: true
#
# # Provide custom key/value pairs which get sent to the identity provider's
# # authorization endpoint.
# extra_params:
# domain_hint: example.com
#
# # Only accept users whose email domain is part of the allowed_domains list.
# allowed_domains:
# - example.com
#
# # Only accept users whose email address is part of the allowed_users list.
# allowed_users:
# - alice@example.com
#
# # Only accept users which are members of at least one group in the
# # allowed_groups list.
# allowed_groups:
# - /headscale
#
# # Optional: PKCE (Proof Key for Code Exchange) configuration
# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow
# # by preventing authorization code interception attacks
# # See https://datatracker.ietf.org/doc/html/rfc7636
# pkce:
# # Enable or disable PKCE support (default: false)
# enabled: false
#
# # PKCE method to use:
# # - plain: Use plain code verifier
# # - S256: Use SHA256 hashed code verifier (default, recommended)
# method: S256
# Logtail configuration
# Logtail is Tailscales logging and auditing infrastructure, it allows the
# control panel to instruct tailscale nodes to log their activity to a remote
# server. To disable logging on the client side, please refer to:
# https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging
logtail:
# Enable logtail for tailscale nodes of this Headscale instance.
# As there is currently no support for overriding the log server in Headscale, this is
# disabled by default. Enabling this will make your clients send logs to Tailscale Inc.
enabled: false
# Enabling this option makes devices prefer a random port for WireGuard traffic over the
# default static port 41641. This option is intended as a workaround for some buggy
# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information.
randomize_client_port: false
# Taildrop configuration
# Taildrop is the file sharing feature of Tailscale, allowing nodes to send files to each other.
# https://tailscale.com/kb/1106/taildrop/
taildrop:
# Enable or disable Taildrop for all nodes.
# When enabled, nodes can send files to other nodes owned by the same user.
# Tagged devices and cross-user transfers are not permitted by Tailscale clients.
enabled: true
# Advanced performance tuning parameters.
# The defaults are carefully chosen and should rarely need adjustment.
# Only modify these if you have identified a specific performance issue.
#
# tuning:
# # NodeStore write batching configuration.
# # The NodeStore batches write operations before rebuilding peer relationships,
# # which is computationally expensive. Batching reduces rebuild frequency.
# #
# # node_store_batch_size: 100
# # node_store_batch_timeout: 500ms

54
Headscale/deployment.yml Normal file
View file

@ -0,0 +1,54 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: headscale
spec:
selector:
matchLabels:
app: headscale
template:
metadata:
labels:
app: headscale
spec:
containers:
- name: headscale
image: docker.io/headscale/headscale:latest
command:
- headscale
- serve
resources:
limits:
memory: "128Mi"
cpu: "500m"
ports:
- containerPort: 8080
- containerPort: 9090
volumeMounts:
- name: headscale-config
mountPath: /etc/headscale
readOnly: true
- name: headscale-data
mountPath: /var/lib/headscale
volumes:
- name: headscale-config
configMap:
name: headscale-config
- name: headscale-data
persistentVolumeClaim:
claimName: headscale-data
- name: headscale-temp
emptyDir:
medium: memory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: headscale-data
spec:
resources:
requests:
storage: 512Mi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce

16
Headscale/ingress.yml Normal file
View file

@ -0,0 +1,16 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: headscale
spec:
rules:
- host: headscale.foxhawk.co.uk
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: headscale
port:
number: 80

10
Headscale/service.yml Normal file
View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: headscale
spec:
selector:
app: headscale
ports:
- port: 80
targetPort: 8080

62
Kavita/deployment.yml Normal file
View file

@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: kavita
spec:
selector:
matchLabels:
app: kavita
template:
metadata:
labels:
app: kavita
spec:
containers:
- name: kavita
image: jvmilazz0/kavita:latest
resources:
limits:
memory: "1Gi"
cpu: "1"
ports:
- containerPort: 5000
name: http
protocol: TCP
volumeMounts:
- name: kavita-config
mountPath: /kavita/config
- name: kavita-data
mountPath: /kavita/library
# readinessProbe:
# httpGet:
# path: /api/health
# port: http
# failureThreshold: 5
# periodSeconds: 10
# livenessProbe:
# httpGet:
# path: /api/health
# port: http
# failureThreshold: 5
# periodSeconds: 10
volumes:
- name: kavita-config
# persistentVolumeClaim:
# claimName: kavita-config
hostPath:
path: /zfs/docker/kavita
- name: kavita-data
hostPath:
path: /mnt/nfs/books
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kavita-config
spec:
resources:
requests:
storage: 512Mi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce

22
Kavita/ingress.yml Normal file
View file

@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kavita
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-production"
spec:
tls:
- hosts:
- kavita.foxhawk.co.uk
secretName: kavita-tls
rules:
- host: kavita.foxhawk.co.uk
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: kavita
port:
number: 80

10
Kavita/service.yml Normal file
View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: kavita
spec:
selector:
app: kavita
ports:
- port: 80
targetPort: 5000

57
Mealie/database.yml Normal file
View file

@ -0,0 +1,57 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie-db
spec:
selector:
matchLabels:
app: mealie-db
template:
metadata:
labels:
app: mealie-db
spec:
containers:
- name: mealie-db
image: postgres:17
resources:
limits:
memory: "256Mi"
cpu: "500m"
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: mealie-db
key: password
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: mealie-db
key: username
- name: PGUSER
valueFrom:
secretKeyRef:
name: mealie-db
key: username
- name: POSTGRES_DB
value: mealie
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: mealie-db
volumes:
- name: mealie-db
persistentVolumeClaim:
claimName: mealie-db
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mealie-db
spec:
resources:
requests:
storage: 512M
volumeMode: Filesystem
accessModes:
- ReadWriteOnce

10
Mealie/db-service.yml Normal file
View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: mealie-db
spec:
selector:
app: mealie-db
ports:
- port: 5432
targetPort: 5432

95
Mealie/deployment.yml Normal file
View file

@ -0,0 +1,95 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: mealie
spec:
selector:
matchLabels:
app: mealie
template:
metadata:
labels:
app: mealie
spec:
containers:
- name: mealie
image: ghcr.io/mealie-recipes/mealie:latest
resources:
limits:
memory: "500Mi"
cpu: "500m"
ports:
- containerPort: 9000
volumeMounts:
- mountPath: /app/data
name: mealie-pvc
env:
- name: ALLOW_SIGNUP
value: "false"
- name: PUID
value: "1000"
- name: PGID
value: "1000"
- name: TZ
value: Europe/London
- name: BASE_URL
value: https://mealie.foxhawk.co.uk
- name: DB_ENGINE
value: postgres
- name: POSTGRES_USER
valueFrom:
secretKeyRef:
name: mealie-db
key: username
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: mealie-db
key: password
- name: POSTGRES_SERVER
value: mealie-db
- name: POSTGRES_PORT
value: "5432"
- name: POSTGRES_DB
value: mealie
- name: OIDC_AUTH_ENABLED
value: "true"
- name: OIDC_SIGNUP_ENABLED
value: "true"
- name: OIDC_CONFIGURATION_URL
value: "https://authentik.foxhawk.co.uk/application/o/mealie/.well-known/openid-configuration"
- name: OIDC_CLIENT_ID
valueFrom:
secretKeyRef:
name: mealie-oidc
key: client-id
- name: OIDC_CLIENT_SECRET
valueFrom:
secretKeyRef:
name: mealie-oidc
key: client-secret
- name: OIDC_ADMIN_GROUP
valueFrom:
secretKeyRef:
name: mealie-oidc
key: admin-group
- name: OIDC_AUTO_REDIRECT
value: "false"
- name: OIDC_PROVIDER_NAME
value: "Authentik"
volumes:
- name: mealie-pvc
persistentVolumeClaim:
claimName: mealie
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mealie
spec:
resources:
requests:
storage: 1G
volumeMode: Filesystem
accessModes:
- ReadWriteOnce

22
Mealie/ingress.yml Normal file
View file

@ -0,0 +1,22 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: mealie
annotations:
cert-manager.io/cluster-issuer: "letsencrypt-production"
spec:
tls:
- hosts:
- mealie.foxhawk.co.uk
secretName: mealie-tls
rules:
- host: mealie.foxhawk.co.uk
http:
paths:
- pathType: Prefix
path: "/"
backend:
service:
name: mealie
port:
number: 80

10
Mealie/service.yml Normal file
View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Service
metadata:
name: mealie
spec:
selector:
app: mealie
ports:
- port: 80
targetPort: 9000