feat(gitea): add backup CronJob with RBAC and NFS support

feat(gitea): create PersistentVolume and PersistentVolumeClaim for Gitea

feat(gitea): add script to create Gitea runner registration token secret

feat(gitea): deploy Gitea Actions runner with Docker socket access

feat(media): deploy JDownloader with Ingress configuration

feat(media): set up Jellyfin media server with NFS and Ingress

feat(media): configure qBittorrent deployment with Ingress

feat(monitoring): add Grafana Loki datasource ConfigMap

feat(monitoring): create Grafana admin credentials secret

feat(monitoring): define PersistentVolumes for monitoring stack

feat(network): implement DDNS CronJob for Porkbun DNS updates

feat(network): create secret for Porkbun DDNS API credentials

feat(network): set up Glances service and Ingress for Debian node

fix(network): patch Pi-hole DNS services with external IPs

feat(network): configure Traefik dashboard Ingress with Authentik auth

feat(network): set up Watch Party service and Ingress for Mac Mini

refactor(values): update Helm values files for various services
This commit is contained in:
Nik Afiq 2026-03-12 21:56:32 +09:00
parent 12f333d5e8
commit 83f46c9748
72 changed files with 345 additions and 258 deletions

View File

@ -1,3 +1,6 @@
# Config for: Environment variables template
# Applied by: Copy to .env and fill in values; sourced by shell scripts
# Porkbun API credentials
PORKBUN_API_KEY=pk1_your_key_here
PORKBUN_SECRET_KEY=sk1_your_key_here
@ -11,4 +14,4 @@ GITEA_RUNNER_TOKEN=your_token_here
# Grafana admin password
GRAFANA_ADMIN_PASSWORD=your_password_here
AUTHENTIK_PROXY_TOKEN=your_token_here
AUTHENTIK_PROXY_TOKEN=your_token_here

View File

@ -27,25 +27,58 @@ Infrastructure-as-Code for a 3-machine homelab running K3s.
| Service | URL | Notes |
|---|---|---|
| Traefik | — | Ingress controller, Let's Encrypt |
| Authentik | `https://authentik.home.arpa` | SSO/identity provider |
| Gitea | `https://gitea.home.arpa` | Git + Docker registry, SSH on port 2222 |
| Pi-hole | `https://pihole.home.arpa/admin` | Primary DNS, resolves `*.home.arpa` → 192.168.7.77 |
| Grafana | `https://grafana.home.arpa` | Monitoring dashboards (kube-prometheus-stack) |
| Jellyfin | `https://jellyfin.home.arpa` | Media server |
| qBittorrent | `https://qbittorrent.home.arpa` | Torrent client |
| JDownloader | `https://jdownloader.home.arpa` | Download manager |
| Dashy | `https://dashy.home.arpa` | Dashboard |
| Glances | `https://glances.home.arpa` | System monitoring |
## Repo Structure
```
ansible/
inventory.yaml # host definitions
inventory.yaml # host definitions
playbooks/
bootstrap-minisforum.yaml # OS hardening, packages, UFW, /data dirs
setup-k3s.yaml # K3s server install, Helm, kubeconfig
bootstrap-minisforum.yaml # OS hardening, packages, UFW, /data dirs
deploy-watch-party.yaml # deploy watch-party app
join-debian-agent.yaml # join Debian as K3s agent
setup-gitea-runner.yaml # set up Gitea Actions runner
setup-glances-debian.yaml # deploy Glances on Debian host
setup-k3s.yaml # K3s server install, Helm, kubeconfig
setup-monitoring.yaml # deploy monitoring stack
setup-nfs-debian.yaml # configure NFS server on Debian
roles/
common/ # user, SSH hardening, UFW, base packages
k3s-server/ # K3s server install + Helm
common/ # user, SSH hardening, UFW, base packages
gitea-runner/ # Gitea Actions runner setup
glances/ # Glances system monitor
k3s-agent/ # K3s agent node join
k3s-server/ # K3s server install + Helm
monitoring/ # Prometheus/Grafana monitoring
nfs-server/ # NFS server configuration
watch-party/ # Watch-party app deployment
config/
dashy/conf.yaml # Dashy dashboard config
manifests/
authentik/ # Authentik ingress, middleware, proxy outpost, secrets
cert-manager/ # ClusterIssuers and porkbun-secret.sh
core/ # Dashy, Glances, CA installer, apply-dashy-config.sh
gitea/ # Gitea PV, runner, backup, runner secret
media/ # Jellyfin, qBittorrent, JDownloader
monitoring/ # Grafana/Loki datasource, PVs, grafana-secret.sh
network/ # DDNS, Traefik dashboard, ingress routes, pihole patch
values/
traefik.yaml ✅ deployed
gitea.yaml 🔧 in progress
pihole.yaml 🔧 in progress
old.debian-data/ # gitignored — backup of pre-migration configs
authentik.yaml # Authentik SSO
cert-manager.yaml # cert-manager
gitea.yaml # Gitea
kube-prometheus-stack.yaml # Prometheus + Grafana
loki-stack.yaml # Loki log aggregation
pihole.yaml # Pi-hole (Minisforum)
pihole-debian.yaml # Pi-hole (Debian)
traefik.yaml # Traefik ingress controller
```
## Prerequisites
@ -92,8 +125,29 @@ helm repo add mojo2600 https://mojo2600.github.io/pihole-kubernetes/ && helm rep
helm upgrade --install pihole mojo2600/pihole \
--namespace pihole --create-namespace \
-f values/pihole.yaml
# cert-manager
helm repo add jetstack https://charts.jetstack.io && helm repo update
helm upgrade --install cert-manager jetstack/cert-manager \
--namespace cert-manager --create-namespace \
-f values/cert-manager.yaml
# Authentik
helm repo add authentik https://charts.goauthentik.io && helm repo update
helm upgrade --install authentik authentik/authentik \
--namespace authentik --create-namespace \
-f values/authentik.yaml
# kube-prometheus-stack
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts && helm repo update
helm upgrade --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
--namespace monitoring --create-namespace \
-f values/kube-prometheus-stack.yaml
# Loki
helm repo add grafana https://grafana.github.io/helm-charts && helm repo update
helm upgrade --install loki grafana/loki-stack \
--namespace monitoring --create-namespace \
-f values/loki-stack.yaml
```
## See Also
- [migration-plan.md](migration-plan.md) — full phase-by-phase migration plan

View File

@ -1,3 +1,6 @@
# Config for: Ansible
# Applied by: Automatically loaded by ansible-playbook when run from repo root
[defaults]
inventory = ansible/inventory.yaml
roles_path = ansible/roles

View File

@ -1,3 +1,5 @@
# Config for: Ansible inventory
# Applied by: Referenced by all ansible-playbook commands with -i flag
all:
vars:
ansible_user: nik
@ -19,4 +21,4 @@ all:
mac_mini:
hosts:
mac-mini:
ansible_host: 192.168.7.96
ansible_host: 192.168.7.96

View File

@ -1,13 +1,6 @@
---
# Run: ansible-playbook -i ansible/inventory.yaml ansible/playbooks/bootstrap-minisforum.yaml
# Requires: SSH access to 192.168.7.7 as root (or a user with NOPASSWD sudo)
#
# What this does:
# - Creates the 'nik' user with sudo access
# - Hardens SSH (no password auth, no root login)
# - Installs base packages
# - Configures UFW firewall
# - Creates /data/* directories for persistent volumes
# Run: ansible-playbook ansible/playbooks/bootstrap-minisforum.yaml -i ansible/inventory.yaml
# Description: Bootstraps the Minisforum server with user setup, SSH hardening, base packages, UFW firewall, and persistent data directories.
- name: Bootstrap Minisforum
hosts: minisforum

View File

@ -1,18 +1,10 @@
---
# Run: ansible-playbook ansible/playbooks/deploy-watch-party.yaml
#
# What this does:
# - Pulls latest watch-party repo from Gitea
# - Starts containers via Docker Compose using registry images
#
# Prerequisites:
# - .env file must exist at ~/repo/watch-party/.env on Mac Mini
# - Docker Desktop must be running on Mac Mini
# - Images must be built and pushed to gitea.home.arpa registry
# Run: ansible-playbook ansible/playbooks/deploy-watch-party.yaml -i ansible/inventory.yaml
# Description: Deploys the watch-party application on Mac Mini by pulling from Gitea and starting containers via Docker Compose.
- name: Deploy Watch Party on Mac Mini
hosts: mac-mini
gather_facts: true
roles:
- watch-party
- watch-party

View File

@ -1,10 +1,6 @@
---
# Run: ansible-playbook ansible/playbooks/join-debian-agent.yaml -K -e "k3s_node_token=$K3S_NODE_TOKEN"
# Requires: K3S_NODE_TOKEN in .env
#
# What this does:
# - Joins Debian as a K3s agent node
# - Labels it as node-role=storage
# Run: ansible-playbook ansible/playbooks/join-debian-agent.yaml -i ansible/inventory.yaml -K -e "k3s_node_token=$K3S_NODE_TOKEN"
# Description: Joins the Debian machine as a K3s agent node and labels it as storage.
- name: Join Debian as K3s agent
hosts: debian
@ -12,4 +8,4 @@
gather_facts: true
roles:
- k3s-agent
- k3s-agent

View File

@ -1,9 +1,6 @@
---
# Run: ansible-playbook ansible/playbooks/setup-gitea-runner.yaml
#
# What this does:
# - Installs act_runner as a systemd service on Minisforum
# - Registers runner with Gitea
# Run: ansible-playbook ansible/playbooks/setup-gitea-runner.yaml -i ansible/inventory.yaml
# Description: Installs and registers the Gitea Actions runner as a systemd service on Minisforum.
- name: Deploy Gitea Actions Runner on Minisforum
hosts: minisforum
@ -12,4 +9,4 @@
gitea_runner_token: "{{ lookup('env', 'GITEA_RUNNER_TOKEN') }}"
roles:
- gitea-runner
- gitea-runner

View File

@ -1,13 +1,10 @@
---
# Run: ansible-playbook ansible/playbooks/setup-glances-debian.yaml
#
# What this does:
# - Deploys Glances on Debian as a Docker container
# - Accessible at http://192.168.7.183:61208
# Run: ansible-playbook ansible/playbooks/setup-glances-debian.yaml -i ansible/inventory.yaml
# Description: Deploys Glances as a Docker container on the Debian host for system monitoring.
- name: Deploy Glances on Debian
hosts: debian
gather_facts: true
roles:
- glances
- glances

View File

@ -1,21 +1,6 @@
---
# Run: ansible-playbook ansible/playbooks/setup-k3s.yaml -K
#
# What this does:
# - Installs K3s in server mode (with Traefik disabled)
# - Installs Helm
# - Fetches kubeconfig to ~/.kube/config on your workstation
# - Labels the node as node-role=primary
#
# After this playbook:
# kubectl get nodes # should show minisforum as Ready
#
# Then deploy Traefik:
# helm repo add traefik https://helm.traefik.io/traefik
# helm repo update
# helm upgrade --install traefik traefik/traefik \
# --namespace traefik --create-namespace \
# -f values/traefik.yaml
# Run: ansible-playbook ansible/playbooks/setup-k3s.yaml -i ansible/inventory.yaml -K
# Description: Installs K3s server, Helm, fetches kubeconfig, and labels the Minisforum node as primary.
- name: Install K3s server
hosts: minisforum

View File

@ -1,4 +1,8 @@
---
# Run: ansible-playbook ansible/playbooks/setup-monitoring.yaml -i ansible/inventory.yaml
# Description: Creates monitoring data directories for Prometheus and Loki on Minisforum.
- name: Prepare monitoring storage on Minisforum
hosts: minisforum
roles:
- monitoring
- monitoring

View File

@ -1,9 +1,6 @@
---
# Run: ansible-playbook ansible/playbooks/setup-nfs-debian.yaml -K
#
# What this does:
# - Installs NFS server on Debian
# - Exports /mnt/storage to Minisforum (read-only)
# Run: ansible-playbook ansible/playbooks/setup-nfs-debian.yaml -i ansible/inventory.yaml -K
# Description: Installs and configures NFS server on Debian, exporting /mnt/storage to Minisforum.
- name: Set up NFS server on Debian
hosts: debian
@ -11,4 +8,4 @@
gather_facts: true
roles:
- nfs-server
- nfs-server

View File

@ -1,4 +1,8 @@
---
# Part of role: common
# Called by: ansible/playbooks/bootstrap-minisforum.yaml
# Description: Default variables for the common role including user, packages, firewall ports, and data directories.
username: nik
timezone: Asia/Tokyo
@ -12,7 +16,7 @@ base_packages:
- ca-certificates
- gnupg
- lsb-release
- nfs-common # needed in Phase 4 for Jellyfin NFS mount from Debian
- nfs-common
ufw_allowed_ports:
- { port: 430, proto: tcp, comment: SSH }

View File

@ -1,4 +1,8 @@
---
# Part of role: common
# Called by: ansible/playbooks/bootstrap-minisforum.yaml
# Description: Handlers for the common role, including SSH service restart.
- name: Restart sshd
ansible.builtin.service:
name: sshd

View File

@ -1,4 +1,8 @@
---
# Part of role: common
# Called by: ansible/playbooks/bootstrap-minisforum.yaml
# Description: Sets timezone, installs base packages, creates user, hardens SSH, configures UFW, and creates data directories.
- name: Set timezone
community.general.timezone:
name: "{{ timezone }}"

View File

@ -1,6 +1,10 @@
---
# Part of role: gitea-runner
# Called by: ansible/playbooks/setup-gitea-runner.yaml
# Description: Handlers for the gitea-runner role, including service restart.
- name: Restart act_runner
ansible.builtin.systemd:
name: act_runner
state: restarted
become: true
become: true

View File

@ -1,4 +1,8 @@
---
# Part of role: gitea-runner
# Called by: ansible/playbooks/setup-gitea-runner.yaml
# Description: Downloads, configures, and registers act_runner as a systemd service connected to the Gitea instance.
- name: Download act_runner binary
ansible.builtin.get_url:
url: https://gitea.com/gitea/act_runner/releases/download/v0.2.11/act_runner-0.2.11-linux-amd64
@ -101,4 +105,4 @@
enabled: true
state: started
daemon_reload: true
become: true
become: true

View File

@ -1,6 +1,10 @@
---
# Part of role: glances
# Called by: ansible/playbooks/setup-glances-debian.yaml
# Description: Handlers for the glances role, including container restart.
- name: Restart Glances container
community.docker.docker_container:
name: glances
state: started
restart: true
restart: true

View File

@ -1,4 +1,8 @@
---
# Part of role: glances
# Called by: ansible/playbooks/setup-glances-debian.yaml
# Description: Deploys Glances as a Docker container with host networking for system monitoring.
- name: Create Glances config directory
ansible.builtin.file:
path: /etc/glances
@ -32,4 +36,4 @@
- /proc:/proc:ro
- /sys:/sys:ro
- /mnt:/mnt:ro
- /etc/glances:/etc/glances:ro
- /etc/glances:/etc/glances:ro

View File

@ -1,4 +1,8 @@
---
# Part of role: k3s-agent
# Called by: ansible/playbooks/join-debian-agent.yaml
# Description: Default variables for the k3s-agent role including version, server URL, and join token.
k3s_version: v1.32.2+k3s1
k3s_server_url: https://192.168.7.77:6443
k3s_node_token: "" # pass via -e or vault
k3s_node_token: ""

View File

@ -1,4 +1,8 @@
---
# Part of role: k3s-agent
# Called by: ansible/playbooks/join-debian-agent.yaml
# Description: Installs K3s in agent mode, joins the cluster, and labels the node as storage.
- name: Download and install K3s agent
ansible.builtin.shell:
cmd: >
@ -23,4 +27,4 @@
node-role=storage --overwrite
delegate_to: minisforum
become: true
changed_when: false
changed_when: false

View File

@ -1,11 +1,14 @@
---
k3s_version: v1.32.2+k3s1 # pin to a specific version; update deliberately
# Part of role: k3s-server
# Called by: ansible/playbooks/setup-k3s.yaml
# Description: Default variables for the k3s-server role including version, IP, and server configuration.
k3s_version: v1.32.2+k3s1
k3s_server_ip: 192.168.7.77
# Written to /etc/rancher/k3s/config.yaml on the server
k3s_server_config:
disable:
- traefik # we deploy Traefik ourselves via Helm
- traefik
flannel-backend: vxlan
node-ip: "{{ k3s_server_ip }}"
tls-san:

View File

@ -1,4 +1,8 @@
---
# Part of role: k3s-server
# Called by: ansible/playbooks/setup-k3s.yaml
# Description: Installs K3s server, fetches kubeconfig, installs Helm, and labels the node as primary.
- name: Create K3s config directory
ansible.builtin.file:
path: /etc/rancher/k3s
@ -17,7 +21,7 @@
curl -sfL https://get.k3s.io |
INSTALL_K3S_VERSION={{ k3s_version }}
sh -
creates: /usr/local/bin/k3s # skip if already installed
creates: /usr/local/bin/k3s
- name: Wait for K3s to be ready
ansible.builtin.wait_for:
@ -39,7 +43,7 @@
ansible.builtin.set_fact:
k3s_node_token: "{{ k3s_token_raw['content'] | b64decode | trim }}"
- name: Print node token (needed for Phase 2 agent join)
- name: Print node token
ansible.builtin.debug:
msg: "K3s node token: {{ k3s_node_token }}"
@ -65,4 +69,4 @@
- name: Label server node as primary
ansible.builtin.shell:
cmd: k3s kubectl label node minisforum node-role=primary --overwrite
changed_when: false # label is idempotent but shell module always reports changed
changed_when: false

View File

@ -1,11 +1,16 @@
---
# Part of role: monitoring
# Called by: ansible/playbooks/setup-monitoring.yaml
# Description: Creates data directories with correct ownership for Prometheus and Loki.
- name: Create monitoring data directories
file:
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
owner: "{{ item.owner }}"
group: "{{ item.owner }}"
mode: "0755"
loop:
- { path: /data/prometheus, owner: "65534" } # nobody — Prometheus UID
- { path: /data/loki, owner: "10001" } # Loki UID
become: true
- { path: /data/prometheus, owner: "65534" }
- { path: /data/loki, owner: "10001" }
become: true

View File

@ -1,3 +1,7 @@
---
# Part of role: nfs-server
# Called by: ansible/playbooks/setup-nfs-debian.yaml
# Description: Default variables for the nfs-server role including export path and allowed client IP.
nfs_export_path: /mnt/storage
nfs_allowed_ip: 192.168.7.77
nfs_allowed_ip: 192.168.7.77

View File

@ -1,6 +1,10 @@
---
# Part of role: nfs-server
# Called by: ansible/playbooks/setup-nfs-debian.yaml
# Description: Handlers for the nfs-server role, including NFS service restart.
- name: Restart NFS server
ansible.builtin.service:
name: nfs-kernel-server
state: restarted
become: true
become: true

View File

@ -1,4 +1,8 @@
---
# Part of role: nfs-server
# Called by: ansible/playbooks/setup-nfs-debian.yaml
# Description: Installs NFS server, configures exports, and ensures the backup directory exists.
- name: Install NFS server
ansible.builtin.apt:
name:
@ -32,4 +36,4 @@
owner: "1001"
group: "1001"
mode: "0755"
become: true
become: true

View File

@ -1,5 +1,7 @@
# Part of role: nfs-server
# Description: NFS exports template rendered to /etc/exports
# /etc/exports - managed by Ansible
# NFS exports for K3s cluster
/mnt/storage 192.168.7.77(ro,sync,no_subtree_check,no_root_squash,fsid=1)
/home/nik/backups 192.168.7.77(rw,sync,no_subtree_check,no_root_squash,fsid=2)
/home/nik/backups 192.168.7.77(rw,sync,no_subtree_check,no_root_squash,fsid=2)

View File

@ -1,3 +1,7 @@
---
# Part of role: watch-party
# Called by: ansible/playbooks/deploy-watch-party.yaml
# Description: Default variables for the watch-party role including repo URL and local directory.
watch_party_repo: https://gitea.home.arpa/nik/watch-party.git
watch_party_dir: /Users/nik/repo/watch-party
watch_party_dir: /Users/nik/repo/watch-party

View File

@ -1,4 +1,8 @@
---
# Part of role: watch-party
# Called by: ansible/playbooks/deploy-watch-party.yaml
# Description: Pulls the latest watch-party code from Gitea and deploys it via Docker Compose.
- name: Pull latest watch-party from Gitea
ansible.builtin.git:
repo: "{{ watch_party_repo }}"
@ -24,4 +28,4 @@
project_src: "{{ watch_party_dir }}"
state: present
pull: always
become: false
become: false

View File

@ -1,3 +1,6 @@
# Config for: Dashy dashboard
# Applied by: manifests/apply-dashy-config.sh (creates ConfigMap from this file)
pageInfo:
title: Good morning, Nik
description: How's your day going?
@ -33,7 +36,7 @@ appConfig:
backdrop-filter: blur(12px);
}
/* Item tiles feel less “boxed” */
/* Item tiles feel less "boxed" */
.item {
border-radius: 14px !important;
}
@ -217,4 +220,4 @@ sections:
itemSize: large
cutToHeight: true
rows: 1
cols: 1
cols: 1

View File

@ -1,12 +0,0 @@
#!/bin/bash
# Usage: bash manifests/apply-dashy-config.sh
# Updates Dashy config from config/dashy/conf.yml
set -e
kubectl create configmap dashy-config \
--from-file=conf.yml=config/dashy/conf.yml \
--namespace dashy \
--dry-run=client -o yaml | kubectl apply -f -
kubectl rollout restart deployment/dashy -n dashy
echo "Dashy config updated"

View File

@ -1,6 +1,8 @@
#!/bin/bash
#!/usr/bin/env bash
# Usage: bash manifests/authentik/authentik-gitea-secret.sh
# Description: Creates the Authentik OAuth secret for Gitea integration
set -euo pipefail
source "$(dirname "$0")/../.env"
source "$(dirname "$0")/../../.env"
kubectl create secret generic authentik-gitea-oauth \
--namespace gitea \

View File

@ -1,6 +1,8 @@
#!/bin/bash
#!/usr/bin/env bash
# Usage: bash manifests/authentik/authentik-grafana-secret.sh
# Description: Creates the Authentik OAuth secret for Grafana integration
set -euo pipefail
source "$(dirname "$0")/../.env"
source "$(dirname "$0")/../../.env"
kubectl create secret generic authentik-grafana-oauth \
--namespace monitoring \

View File

@ -1,3 +1,6 @@
# Apply: kubectl apply -f manifests/authentik/authentik-ingress.yaml
# Delete: kubectl delete -f manifests/authentik/authentik-ingress.yaml
# Description: TLS certificate and Traefik IngressRoute for Authentik at auth.home.arpa.
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:

View File

@ -1,3 +1,6 @@
# Apply: kubectl apply -f manifests/authentik/authentik-middleware.yaml
# Delete: kubectl delete -f manifests/authentik/authentik-middleware.yaml
# Description: Traefik forwardAuth middleware for Authentik and LAN bypass IP allowlist.
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
@ -28,4 +31,4 @@ metadata:
spec:
ipAllowList:
sourceRange:
- 192.168.7.0/24
- 192.168.7.0/24

View File

@ -1,3 +1,6 @@
# Apply: kubectl apply -f manifests/authentik/authentik-proxy-outpost.yaml
# Delete: kubectl delete -f manifests/authentik/authentik-proxy-outpost.yaml
# Description: Authentik proxy outpost deployment and service for forward-auth integration.
apiVersion: v1
kind: ServiceAccount
metadata:
@ -50,4 +53,4 @@ spec:
ports:
- name: http
port: 9000
targetPort: 9000
targetPort: 9000

View File

@ -1,6 +1,8 @@
#!/bin/bash
#!/usr/bin/env bash
# Usage: bash manifests/authentik/authentik-proxy-secret.sh
# Description: Creates the Authentik proxy outpost API token secret
set -euo pipefail
source "$(dirname "$0")/../.env"
source "$(dirname "$0")/../../.env"
kubectl create secret generic authentik-proxy-token \
--namespace authentik \

View File

@ -1,5 +1,6 @@
# authentik public ingress
# Apply: kubectl apply -f manifests/authentik-public-ingress.yaml
# Apply: kubectl apply -f manifests/authentik/authentik-public-ingress.yaml
# Delete: kubectl delete -f manifests/authentik/authentik-public-ingress.yaml
# Description: Public TLS certificate and Traefik IngressRoute for Authentik at auth.nik4nao.com.
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
@ -28,4 +29,4 @@ spec:
- name: authentik-server
port: 80
tls:
secretName: authentik-public-tls
secretName: authentik-public-tls

View File

@ -1,5 +1,7 @@
#!/bin/bash
# Run once to create the Authentik secret. Safe to re-run (dry-run + apply).
#!/usr/bin/env bash
# Usage: bash manifests/authentik/authentik-secret.sh
# Description: Creates the Authentik secret-key and PostgreSQL password (safe to re-run)
set -euo pipefail
kubectl create secret generic authentik-secrets \
--namespace authentik \
--from-literal=secret-key="$(openssl rand -base64 50)" \

View File

@ -1,5 +1,6 @@
# Internal CA for *.home.arpa
# Apply: kubectl apply -f manifests/cert-manager/cluster-issuer-internal.yaml
# Delete: kubectl delete -f manifests/cert-manager/cluster-issuer-internal.yaml
# Description: Internal CA ClusterIssuers and root certificate for *.home.arpa TLS.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
@ -30,4 +31,4 @@ metadata:
name: internal-ca-issuer
spec:
ca:
secretName: internal-ca-cert
secretName: internal-ca-cert

View File

@ -1,5 +1,6 @@
# Let's Encrypt HTTP-01 issuer for *.nik4nao.com
# Apply: kubectl apply -f manifests/cert-manager/cluster-issuer-letsencrypt.yaml
# Delete: kubectl delete -f manifests/cert-manager/cluster-issuer-letsencrypt.yaml
# Description: Let's Encrypt production and staging ClusterIssuers with HTTP-01 via Traefik.
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
@ -28,4 +29,4 @@ spec:
solvers:
- http01:
ingress:
ingressClassName: traefik
ingressClassName: traefik

View File

@ -1,8 +1,7 @@
#!/bin/bash
#!/usr/bin/env bash
# Usage: bash manifests/cert-manager/porkbun-secret.sh
# Requires: .env file in repo root with PORKBUN_API_KEY and PORKBUN_SECRET_API_KEY
set -e
# Description: Creates the Porkbun API credentials secret for cert-manager DNS01 challenges
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="$SCRIPT_DIR/../../.env"

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
# Usage: bash manifests/core/apply-dashy-config.sh
# Description: Updates the Dashy ConfigMap from config/dashy/conf.yaml and restarts the deployment
set -euo pipefail
kubectl create configmap dashy-config \
--from-file=conf.yml=config/dashy/conf.yaml \
--namespace dashy \
--dry-run=client -o yaml | kubectl apply -f -
kubectl rollout restart deployment/dashy -n dashy
echo "Dashy config updated"

View File

@ -1,16 +1,14 @@
# ca-installer.yaml
# CA Trust Installer — serves CA cert + iOS mobileconfig at ca.home.arpa
# Apply: kubectl apply -f manifests/core/ca-installer/ca-installer.yaml
# Delete: kubectl delete -f manifests/core/ca-installer/ca-installer.yaml
# Description: Nginx-based CA certificate installer serving ca.crt and iOS mobileconfig at ca.home.arpa.
#
# Pre-requisites (run once, or after CA cert rotation):
# kubectl create configmap ca-installer-web -n ca-installer \
# --from-file=index.html=manifests/ca-installer/web/index.html
# --from-file=index.html=manifests/core/ca-installer/web/index.html
#
# kubectl create configmap ca-installer-files -n ca-installer \
# --from-file=ca.crt=/tmp/homelab-ca.crt \
# --from-file=ca.mobileconfig=/tmp/homelab-ca.mobileconfig
#
# Apply: kubectl apply -f manifests/ca-installer/ca-installer.yaml
---
apiVersion: v1
kind: Namespace
metadata:
@ -28,13 +26,11 @@ data:
server_name ca.home.arpa;
root /usr/share/nginx/html;
# CA cert — must be application/x-x509-ca-cert for iOS to recognise it
location = /ca.crt {
default_type application/x-x509-ca-cert;
try_files /ca.crt =404;
}
# iOS mobileconfig — must be this exact MIME type
location = /ca.mobileconfig {
default_type application/x-apple-aspen-config;
try_files /ca.mobileconfig =404;
@ -112,8 +108,6 @@ metadata:
name: ca-installer
namespace: ca-installer
annotations:
# No TLS — this page is how you GET the CA, serving over HTTP avoids
# the chicken-and-egg problem. Once CA is trusted, *.home.arpa is fine.
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
spec:
ingressClassName: traefik
@ -127,4 +121,4 @@ spec:
service:
name: ca-installer
port:
number: 80
number: 80

View File

@ -1,3 +1,5 @@
<!-- Served by: manifests/ca-installer/ca-installer.yaml -->
<!-- Description: Landing page for internal CA certificate download -->
<!DOCTYPE html>
<html lang="en">
<head>

View File

@ -1,5 +1,6 @@
# Dashy — homelab dashboard
# Apply: kubectl apply -f manifests/dashy.yaml
# Apply: kubectl apply -f manifests/core/dashy.yaml
# Delete: kubectl delete -f manifests/core/dashy.yaml
# Description: Dashy homelab dashboard with Ingress at dashy.home.arpa.
apiVersion: v1
kind: Namespace
metadata:
@ -12,7 +13,6 @@ metadata:
namespace: dashy
data:
conf.yml: |
# contents will be replaced by kustomize or kubectl apply
---
apiVersion: apps/v1
kind: Deployment
@ -89,4 +89,4 @@ spec:
service:
name: dashy
port:
number: 80
number: 80

View File

@ -1,5 +1,6 @@
# Glances — system monitoring
# Apply: kubectl apply -f manifests/glances.yaml
# Apply: kubectl apply -f manifests/core/glances.yaml
# Delete: kubectl delete -f manifests/core/glances.yaml
# Description: Glances system monitoring DaemonSet with Ingress at glances.home.arpa.
apiVersion: v1
kind: Namespace
metadata:
@ -91,4 +92,4 @@ spec:
service:
name: glances
port:
number: 61208
number: 61208

View File

@ -1,4 +1,6 @@
---
# Apply: kubectl apply -f manifests/gitea/gitea-backup.yaml
# Delete: kubectl delete -f manifests/gitea/gitea-backup.yaml
# Description: CronJob that backs up Gitea to NFS every 7 days, with RBAC and PV/PVC.
apiVersion: v1
kind: ServiceAccount
metadata:

View File

@ -1,3 +1,6 @@
# Apply: kubectl apply -f manifests/gitea/gitea-pv.yaml
# Delete: kubectl delete -f manifests/gitea/gitea-pv.yaml
# Description: PersistentVolume for Gitea data on the minisforum node.
apiVersion: v1
kind: PersistentVolume
metadata:
@ -20,4 +23,4 @@ spec:
- key: kubernetes.io/hostname
operator: In
values:
- minisforum
- minisforum

View File

@ -1,9 +1,9 @@
#!/bin/bash
# Usage: bash manifests/gitea-runner-secret.sh
# Creates gitea-runner-secret from .env
set -e
#!/usr/bin/env bash
# Usage: bash manifests/gitea/gitea-runner-secret.sh
# Description: Creates the Gitea runner registration token secret
set -euo pipefail
source "$(dirname "$0")/../.env"
source "$(dirname "$0")/../../.env"
kubectl create secret generic gitea-runner-secret \
--namespace gitea-runner \

View File

@ -1,5 +1,6 @@
# Gitea Actions Runner
# Apply: kubectl apply -f manifests/gitea-runner.yaml
# Apply: kubectl apply -f manifests/gitea/gitea-runner.yaml
# Delete: kubectl delete -f manifests/gitea/gitea-runner.yaml
# Description: Gitea Actions runner deployment with host Docker socket and internal CA trust.
apiVersion: v1
kind: Namespace
metadata:

View File

@ -1,5 +1,6 @@
# JDownloader + jd-bridge
# Apply: kubectl apply -f manifests/jdownloader.yaml
# Apply: kubectl apply -f manifests/media/jdownloader.yaml
# Delete: kubectl delete -f manifests/media/jdownloader.yaml
# Description: JDownloader deployment with Ingress at jdownloader.home.arpa.
apiVersion: apps/v1
kind: Deployment
metadata:
@ -82,4 +83,4 @@ spec:
service:
name: jdownloader
port:
number: 80
number: 80

View File

@ -1,11 +1,11 @@
# Jellyfin — media server
# Apply: kubectl apply -f manifests/jellyfin.yaml
# Apply: kubectl apply -f manifests/media/jellyfin.yaml
# Delete: kubectl delete -f manifests/media/jellyfin.yaml
# Description: Jellyfin media server with NFS media PV, local config PVC, and Ingress at jellyfin.home.arpa.
apiVersion: v1
kind: Namespace
metadata:
name: jellyfin
---
# PV for media — NFS mount from Debian
apiVersion: v1
kind: PersistentVolume
metadata:
@ -28,7 +28,6 @@ spec:
values:
- minisforum
---
# PVC for media
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@ -45,7 +44,6 @@ spec:
requests:
storage: 10Ti
---
# PVC for Jellyfin config — local storage on Minisforum
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
@ -154,4 +152,4 @@ spec:
service:
name: jellyfin
port:
number: 80
number: 80

View File

@ -1,5 +1,6 @@
# qBittorrent
# Apply: kubectl apply -f manifests/qbittorrent.yaml
# Apply: kubectl apply -f manifests/media/qbittorrent.yaml
# Delete: kubectl delete -f manifests/media/qbittorrent.yaml
# Description: qBittorrent deployment with Ingress at qbittorrent.home.arpa.
apiVersion: v1
kind: Namespace
metadata:
@ -102,4 +103,4 @@ spec:
service:
name: qbittorrent
port:
number: 80
number: 80

View File

@ -1,5 +1,6 @@
# Grafana Loki datasource configuration for Grafana in the monitoring namespace
# Apply: kubectl apply -f manifests/grafana-loki-datasource.yaml
# Apply: kubectl apply -f manifests/monitoring/grafana-loki-datasource.yaml
# Delete: kubectl delete -f manifests/monitoring/grafana-loki-datasource.yaml
# Description: ConfigMap that provisions Loki as a Grafana datasource in the monitoring namespace.
apiVersion: v1
kind: ConfigMap
metadata:
@ -17,4 +18,4 @@ data:
url: http://loki-stack.monitoring.svc.cluster.local:3100
isDefault: false
version: 1
editable: true
editable: true

View File

@ -1,6 +1,8 @@
#!/bin/bash
#!/usr/bin/env bash
# Usage: bash manifests/monitoring/grafana-secret.sh
# Description: Creates the Grafana admin credentials secret
set -euo pipefail
source "$(dirname "$0")/../.env"
source "$(dirname "$0")/../../.env"
kubectl create secret generic grafana-admin-secret \
--namespace monitoring \

View File

@ -1,3 +1,6 @@
# Apply: kubectl apply -f manifests/monitoring/monitoring-pvs.yaml
# Delete: kubectl delete -f manifests/monitoring/monitoring-pvs.yaml
# Description: PersistentVolumes for Prometheus, Grafana, and Loki data directories.
apiVersion: v1
kind: PersistentVolume
metadata:
@ -38,4 +41,4 @@ spec:
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
hostPath:
path: /data/loki
path: /data/loki

View File

@ -1,6 +1,6 @@
# DDNS CronJob — updates home.nik4nao.com on Porkbun every 5 minutes
# Requires: porkbun-ddns secret in ddns namespace
# Apply: kubectl apply -f manifests/ddns-cronjob.yaml
# Apply: kubectl apply -f manifests/network/ddns-cronjob.yaml
# Delete: kubectl delete -f manifests/network/ddns-cronjob.yaml
# Description: CronJob that updates home.nik4nao.com DNS on Porkbun every 5 minutes.
apiVersion: v1
kind: Namespace
metadata:
@ -93,7 +93,6 @@ spec:
--arg fqdn "$RECORD_FQDN" \
'.records[] | select(.type=="A" and .name==$fqdn) | .id' | head -1)"
# Skip if already correct (single record, correct IP)
if [ "$RECORD_COUNT" -le 1 ] && [ "$WAN_IP" = "$DNS_IP" ]; then
echo "[$(timestamp)] No change, skipping."
@ -121,4 +120,4 @@ spec:
echo "[$(timestamp)] Response: $(echo "$RESP" | jq -c .)"
echo "$RESP" | grep -q '"status":"SUCCESS"' && \
echo "[$(timestamp)] Update successful" || \
echo "[$(timestamp)] Update failed"
echo "[$(timestamp)] Update failed"

View File

@ -1,9 +1,10 @@
#!/bin/bash
# Usage: bash manifests/ddns-secret.sh
set -e
#!/usr/bin/env bash
# Usage: bash manifests/network/ddns-secret.sh
# Description: Creates the Porkbun DDNS API credentials secret
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ENV_FILE="$SCRIPT_DIR/../.env"
ENV_FILE="$SCRIPT_DIR/../../.env"
if [ ! -f "$ENV_FILE" ]; then
echo "Error: .env file not found"

View File

@ -1,5 +1,6 @@
# Glances — Debian node, proxied via Traefik
# Apply: kubectl apply -f manifests/glances-debian-ingress.yaml
# Apply: kubectl apply -f manifests/network/glances-debian-ingress.yaml
# Delete: kubectl delete -f manifests/network/glances-debian-ingress.yaml
# Description: External Endpoints, Service, and Ingress to proxy Glances on the Debian node via Traefik.
apiVersion: v1
kind: Endpoints
metadata:
@ -46,4 +47,4 @@ spec:
service:
name: glances-debian
port:
number: 61208
number: 61208

View File

@ -1,7 +1,7 @@
#!/bin/bash
# Usage: bash manifests/pihole-debian-patch.sh
# Patches pihole-debian DNS services with externalIPs after helm upgrade
set -e
#!/usr/bin/env bash
# Usage: bash manifests/network/pihole-debian-patch.sh
# Description: Patches pihole-debian DNS services with externalIPs after helm upgrade
set -euo pipefail
kubectl patch svc pihole-debian-dns-tcp -n pihole \
-p '{"spec":{"externalIPs":["192.168.7.183"]}}'

View File

@ -1,5 +1,6 @@
# Traefik dashboard IngressRoute and TLS certificate for accessing the dashboard at https://traefik.home.arpa. The dashboard is protected by the authentik authentication middleware, with a bypass for LAN clients.
# Apply: kubectl apply -f manifests/traefik-dashboard-ingress.yaml
# Apply: kubectl apply -f manifests/network/traefik-dashboard-ingress.yaml
# Delete: kubectl delete -f manifests/network/traefik-dashboard-ingress.yaml
# Description: Traefik dashboard IngressRoute with Authentik auth, root redirect, and TLS certificate.
apiVersion: traefik.io/v1alpha1
kind: IngressRoute
metadata:
@ -64,4 +65,4 @@ spec:
name: internal-ca
kind: ClusterIssuer
dnsNames:
- traefik.home.arpa
- traefik.home.arpa

View File

@ -1,5 +1,6 @@
# Watch Party — external service on Mac Mini
# Apply: kubectl apply -f manifests/watch-party-ingress.yaml
# Apply: kubectl apply -f manifests/network/watch-party-ingress.yaml
# Delete: kubectl delete -f manifests/network/watch-party-ingress.yaml
# Description: External Endpoints, Service, and Ingress for Watch Party on Mac Mini at watch-party.nik4nao.com.
apiVersion: v1
kind: Endpoints
metadata:
@ -46,4 +47,4 @@ spec:
service:
name: watch-party-mac-mini
port:
number: 3000
number: 3000

View File

@ -1,10 +1,5 @@
# authentik Helm values
# Deploy:
# helm upgrade --install authentik authentik/authentik \
# --namespace authentik \
# --version 2026.2.1 \
# --values values/authentik.yaml \
# --wait --timeout 5m
# Apply: helm upgrade --install authentik authentik/authentik -f values/authentik.yaml -n authentik --create-namespace
# Description: Helm values for Authentik SSO/identity provider
authentik:
secret_key: "" # kept blank — comes from existingSecret via env below

View File

@ -1,10 +1,5 @@
# cert-manager Helm values
# Deploy:
# helm repo add jetstack https://charts.jetstack.io
# helm repo update
# helm upgrade --install cert-manager jetstack/cert-manager \
# --namespace cert-manager --create-namespace \
# -f values/cert-manager.yaml
# Apply: helm upgrade --install cert-manager jetstack/cert-manager -f values/cert-manager.yaml -n cert-manager --create-namespace
# Description: Helm values for cert-manager TLS certificate automation
crds:
enabled: true

View File

@ -1,8 +1,5 @@
# Gitea Helm values
# Deploy:
# helm upgrade --install gitea gitea-charts/gitea \
# --namespace gitea --create-namespace \
# -f values/gitea.yaml
# Apply: helm upgrade --install gitea gitea-charts/gitea -f values/gitea.yaml -n gitea --create-namespace
# Description: Helm values for Gitea git server and Docker registry
replicaCount: 1

View File

@ -1,9 +1,5 @@
# kube-prometheus-stack
# Chart: 82.10.2 / App: v0.89.0
#
# helm upgrade --install kube-prometheus-stack prometheus-community/kube-prometheus-stack \
# --namespace monitoring --create-namespace \
# -f values/kube-prometheus-stack.yaml
# Apply: helm upgrade --install kube-prometheus-stack prometheus-community/kube-prometheus-stack -f values/kube-prometheus-stack.yaml -n monitoring --create-namespace
# Description: Helm values for Prometheus, Grafana, and Alertmanager monitoring stack
grafana:
admin:

View File

@ -1,9 +1,5 @@
# loki-stack (Loki + Promtail)
# Chart: 2.10.3 / App: v2.9.3
#
# helm upgrade --install loki-stack grafana/loki-stack \
# --namespace monitoring --create-namespace \
# -f values/loki-stack.yaml
# Apply: helm upgrade --install loki-stack grafana/loki-stack -f values/loki-stack.yaml -n monitoring --create-namespace
# Description: Helm values for Loki log aggregation and Promtail log collector
loki:
persistence:

View File

@ -1,10 +1,5 @@
# Pihole — secondary instance on Debian node
# Pihole Helm values
# Chart: mojo2600/pihole
# Deploy:
# helm upgrade --install pihole-debian mojo2600/pihole \
# --namespace pihole \
# -f values/pihole-debian.yaml
# Apply: helm upgrade --install pihole-debian mojo2600/pihole -f values/pihole-debian.yaml -n pihole --create-namespace
# Description: Helm values for Pi-hole secondary instance on Debian node
replicaCount: 1

View File

@ -1,11 +1,5 @@
# Pihole Helm values
# Chart: mojo2600/pihole
# Deploy:
# helm repo add mojo2600 https://mojo2600.github.io/pihole-kubernetes/
# helm repo update
# helm upgrade --install pihole mojo2600/pihole \
# --namespace pihole --create-namespace \
# -f values/pihole.yaml
# Apply: helm upgrade --install pihole mojo2600/pihole -f values/pihole.yaml -n pihole --create-namespace
# Description: Helm values for Pi-hole DNS server on Minisforum
replicaCount: 1

View File

@ -1,13 +1,5 @@
# Traefik Helm values — compatible with Traefik chart v34+ (Traefik v3)
# Traefik Helm values — Phase 1
# Chart: traefik/traefik
# Deploy:
# helm repo add traefik https://helm.traefik.io/traefik
# helm repo update
# helm upgrade --install traefik traefik/traefik \
# --namespace traefik --create-namespace \
# -f values/traefik.yaml
# Minimal Traefik v3 / chart v39 compatible values
# Apply: helm upgrade --install traefik traefik/traefik -f values/traefik.yaml -n traefik --create-namespace
# Description: Helm values for Traefik v3 ingress controller with Let's Encrypt
deployment:
replicas: 1