From b0e57f71f4e6f143971902741fbdb854e5ce9073 Mon Sep 17 00:00:00 2001 From: Konstantin Veltmann <7jlpiyc9@gmail.com> Date: Sat, 3 Jan 2026 22:54:18 +0000 Subject: [PATCH] Hello, World! --- .gitignore | 2 + 00-caddy/config/Caddyfile | 49 ++++ 00-caddy/docker-compose.yaml | 25 ++ 01-headscale/config/config.yaml | 409 +++++++++++++++++++++++++++ 01-headscale/docker-compose.yaml | 20 ++ 02-copyparty/config/copyparty.conf | 15 + 02-copyparty/docker-compose.yaml | 19 ++ 03-grafana/docker-compose.yaml | 18 ++ 04-prometheus/config/prometheus.yaml | 15 + 04-prometheus/docker-compose.yaml | 14 + 05-node_exporter/docker-compose.yaml | 11 + docker-compose.yaml | 11 + 12 files changed, 608 insertions(+) create mode 100644 .gitignore create mode 100644 00-caddy/config/Caddyfile create mode 100644 00-caddy/docker-compose.yaml create mode 100644 01-headscale/config/config.yaml create mode 100644 01-headscale/docker-compose.yaml create mode 100644 02-copyparty/config/copyparty.conf create mode 100644 02-copyparty/docker-compose.yaml create mode 100644 03-grafana/docker-compose.yaml create mode 100644 04-prometheus/config/prometheus.yaml create mode 100644 04-prometheus/docker-compose.yaml create mode 100644 05-node_exporter/docker-compose.yaml create mode 100644 docker-compose.yaml diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0c54f60 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +data/ +.env diff --git a/00-caddy/config/Caddyfile b/00-caddy/config/Caddyfile new file mode 100644 index 0000000..0637614 --- /dev/null +++ b/00-caddy/config/Caddyfile @@ -0,0 +1,49 @@ +#TODO: find out how to make the domain a variable + +{ + metrics { + per_host + } +} + +# Hosted locally +vpn.veltko.de { + reverse_proxy * http://headscale:8080 +} + +copyparty.veltko.de { + reverse_proxy * http://copyparty:3923 +} + +grafana.veltko.de { + reverse_proxy * http://grafana:3000 +} + +# Hosted on the home network, tunneled through tailscale +jellyfin.veltko.de { + reverse_proxy http://192.168.0.109:30013 +} + +fotos.veltko.de { + reverse_proxy http://192.168.0.109:30041 +} + +git.veltko.de { + reverse_proxy http://192.168.0.109:30008 +} + +vaultwarden.veltko.de { + tls { + client_auth { + mode require_and_verify + trust_pool file /data/custom_certs/ca.crt + } + } + + reverse_proxy https://192.168.0.109:30032 { + transport http { + #FIXME: this should really be replaced by a proper cert :( + tls_insecure_skip_verify + } + } +} diff --git a/00-caddy/docker-compose.yaml b/00-caddy/docker-compose.yaml new file mode 100644 index 0000000..f988222 --- /dev/null +++ b/00-caddy/docker-compose.yaml @@ -0,0 +1,25 @@ +services: + caddy: + image: caddy:latest + container_name: caddy + restart: always + networks: + nw-caddy: + stdin_open: true + tty: true + volumes: + - ./data:/data + - ./config:/config + - /etc/localtime:/etc/localtime:ro + ports: + - 80:80 + - 443:443 + environment: + - CADDY_ADMIN=0.0.0.0:2019 + entrypoint: /usr/bin/caddy run --adapter caddyfile --config /config/Caddyfile + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://127.0.0.1:2019/config/"] + interval: 1m + timeout: 10s + retries: 3 + start_period: 5s diff --git a/01-headscale/config/config.yaml b/01-headscale/config/config.yaml new file mode 100644 index 0000000..d69714a --- /dev/null +++ b/01-headscale/config/config.yaml @@ -0,0 +1,409 @@ +--- +# headscale will look for a configuration file named `config.yaml` (or `config.json`) in the following order: +# +# - `/etc/headscale` +# - `~/.headscale` +# - current working directory + +# The url clients will connect to. +# Typically this will be a domain like: +# +# https://myheadscale.example.com:443 +# +server_url: https://vpn.veltko.de + +# Address to listen to / bind to on the server +# +# For production: +listen_addr: 0.0.0.0:8080 +#listen_addr: 127.0.0.1:8080 + +# Address to listen to /metrics and /debug, you may want +# to keep this endpoint private to your internal network +metrics_listen_addr: 0.0.0.0:9090 + +# Address to listen for gRPC. +# gRPC is used for controlling a headscale server +# remotely with the CLI +# Note: Remote access _only_ works if you have +# valid certificates. +# +# For production: +# grpc_listen_addr: 0.0.0.0:50443 +#grpc_listen_addr: 127.0.0.1:50443 + +# Allow the gRPC admin interface to run in INSECURE +# mode. This is not recommended as the traffic will +# be unencrypted. Only enable if you know what you +# are doing. +grpc_allow_insecure: false + +# The Noise section includes specific configuration for the +# TS2021 Noise protocol +noise: + # The Noise private key is used to encrypt the traffic between headscale and + # Tailscale clients when using the new Noise-based protocol. A missing key + # will be automatically generated. + private_key_path: /var/lib/headscale/noise_private.key + +# List of IP prefixes to allocate tailaddresses from. +# Each prefix consists of either an IPv4 or IPv6 address, +# and the associated prefix length, delimited by a slash. +# It must be within IP ranges supported by the Tailscale +# client - i.e., subnets of 100.64.0.0/10 and fd7a:115c:a1e0::/48. +# See below: +# IPv6: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#LL81C52-L81C71 +# IPv4: https://github.com/tailscale/tailscale/blob/22ebb25e833264f58d7c3f534a8b166894a89536/net/tsaddr/tsaddr.go#L33 +# Any other range is NOT supported, and it will cause unexpected issues. +prefixes: + v4: 100.64.0.0/10 + v6: fd7a:115c:a1e0::/48 + + # Strategy used for allocation of IPs to nodes, available options: + # - sequential (default): assigns the next free IP from the previous given + # IP. A best-effort approach is used and Headscale might leave holes in the + # IP range or fill up existing holes in the IP range. + # - random: assigns the next free IP from a pseudo-random IP generator (crypto/rand). + allocation: sequential + +# DERP is a relay system that Tailscale uses when a direct +# connection cannot be established. +# https://tailscale.com/blog/how-tailscale-works/#encrypted-tcp-relays-derp +# +# headscale needs a list of DERP servers that can be presented +# to the clients. +derp: + server: + # If enabled, runs the embedded DERP server and merges it into the rest of the DERP config + # The Headscale server_url defined above MUST be using https, DERP requires TLS to be in place + enabled: false + + # Region ID to use for the embedded DERP server. + # The local DERP prevails if the region ID collides with other region ID coming from + # the regular DERP config. + region_id: 999 + + # Region code and name are displayed in the Tailscale UI to identify a DERP region + region_code: "headscale" + region_name: "Headscale Embedded DERP" + + # Only allow clients associated with this server access + verify_clients: true + + # Listens over UDP at the configured address for STUN connections - to help with NAT traversal. + # When the embedded DERP server is enabled stun_listen_addr MUST be defined. + # + # For more details on how this works, check this great article: https://tailscale.com/blog/how-tailscale-works/ + stun_listen_addr: "0.0.0.0:3478" + + # Private key used to encrypt the traffic between headscale DERP and + # Tailscale clients. A missing key will be automatically generated. + private_key_path: /var/lib/headscale/derp_server_private.key + + # This flag can be used, so the DERP map entry for the embedded DERP server is not written automatically, + # it enables the creation of your very own DERP map entry using a locally available file with the parameter DERP.paths + # If you enable the DERP server and set this to false, it is required to add the DERP server to the DERP map using DERP.paths + automatically_add_embedded_derp_region: true + + # For better connection stability (especially when using an Exit-Node and DNS is not working), + # it is possible to optionally add the public IPv4 and IPv6 address to the Derp-Map using: + ipv4: 198.51.100.1 + ipv6: 2001:db8::1 + + # List of externally available DERP maps encoded in JSON + urls: + - https://controlplane.tailscale.com/derpmap/default + + # Locally available DERP map files encoded in YAML + # + # This option is mostly interesting for people hosting + # their own DERP servers: + # https://tailscale.com/kb/1118/custom-derp-servers/ + # + # paths: + # - /etc/headscale/derp-example.yaml + paths: [] + + # If enabled, a worker will be set up to periodically + # refresh the given sources and update the derpmap + # will be set up. + auto_update_enabled: true + + # How often should we check for DERP updates? + update_frequency: 3h + +# Disables the automatic check for headscale updates on startup +disable_check_updates: false + +# Time before an inactive ephemeral node is deleted? +ephemeral_node_inactivity_timeout: 30m + +database: + # Database type. Available options: sqlite, postgres + # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons. + # All new development, testing and optimisations are done with SQLite in mind. + type: sqlite + + # Enable debug mode. This setting requires the log.level to be set to "debug" or "trace". + debug: false + + # GORM configuration settings. + gorm: + # Enable prepared statements. + prepare_stmt: true + + # Enable parameterized queries. + parameterized_queries: true + + # Skip logging "record not found" errors. + skip_err_record_not_found: true + + # Threshold for slow queries in milliseconds. + slow_threshold: 1000 + + # SQLite config + sqlite: + path: /var/lib/headscale/db.sqlite + + # Enable WAL mode for SQLite. This is recommended for production environments. + # https://www.sqlite.org/wal.html + write_ahead_log: true + + # Maximum number of WAL file frames before the WAL file is automatically checkpointed. + # https://www.sqlite.org/c3ref/wal_autocheckpoint.html + # Set to 0 to disable automatic checkpointing. + wal_autocheckpoint: 1000 + + # # Postgres config + # Please note that using Postgres is highly discouraged as it is only supported for legacy reasons. + # See database.type for more information. + # postgres: + # # If using a Unix socket to connect to Postgres, set the socket path in the 'host' field and leave 'port' blank. + # host: localhost + # port: 5432 + # name: headscale + # user: foo + # pass: bar + # max_open_conns: 10 + # max_idle_conns: 10 + # conn_max_idle_time_secs: 3600 + + # # If other 'sslmode' is required instead of 'require(true)' and 'disabled(false)', set the 'sslmode' you need + # # in the 'ssl' field. Refers to https://www.postgresql.org/docs/current/libpq-ssl.html Table 34.1. + # ssl: false + +### TLS configuration +# +## Let's encrypt / ACME +# +# headscale supports automatically requesting and setting up +# TLS for a domain with Let's Encrypt. +# +# URL to ACME directory +acme_url: https://acme-v02.api.letsencrypt.org/directory + +# Email to register with ACME provider +acme_email: "" + +# Domain name to request a TLS certificate for: +tls_letsencrypt_hostname: "" + +# Path to store certificates and metadata needed by +# letsencrypt +# For production: +tls_letsencrypt_cache_dir: /var/lib/headscale/cache + +# Type of ACME challenge to use, currently supported types: +# HTTP-01 or TLS-ALPN-01 +# See: docs/ref/tls.md for more information +tls_letsencrypt_challenge_type: HTTP-01 +# When HTTP-01 challenge is chosen, letsencrypt must set up a +# verification endpoint, and it will be listening on: +# :http = port 80 +tls_letsencrypt_listen: ":http" + +## Use already defined certificates: +tls_cert_path: "" +tls_key_path: "" + +log: + # Valid log levels: panic, fatal, error, warn, info, debug, trace + level: info + + # Output formatting for logs: text or json + format: json + +## Policy +# headscale supports Tailscale's ACL policies. +# Please have a look to their KB to better +# understand the concepts: https://tailscale.com/kb/1018/acls/ +policy: + # The mode can be "file" or "database" that defines + # where the ACL policies are stored and read from. + mode: file + # If the mode is set to "file", the path to a + # HuJSON file containing ACL policies. + path: "" + +## DNS +# +# headscale supports Tailscale's DNS configuration and MagicDNS. +# Please have a look to their KB to better understand the concepts: +# +# - https://tailscale.com/kb/1054/dns/ +# - https://tailscale.com/kb/1081/magicdns/ +# - https://tailscale.com/blog/2021-09-private-dns-with-magicdns/ +# +# Please note that for the DNS configuration to have any effect, +# clients must have the `--accept-dns=true` option enabled. This is the +# default for the Tailscale client. This option is enabled by default +# in the Tailscale client. +# +# Setting _any_ of the configuration and `--accept-dns=true` on the +# clients will integrate with the DNS manager on the client or +# overwrite /etc/resolv.conf. +# https://tailscale.com/kb/1235/resolv-conf +# +# If you want stop Headscale from managing the DNS configuration +# all the fields under `dns` should be set to empty values. +dns: + # Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/). + magic_dns: false + + # Defines the base domain to create the hostnames for MagicDNS. + # This domain _must_ be different from the server_url domain. + # `base_domain` must be a FQDN, without the trailing dot. + # The FQDN of the hosts will be + # `hostname.base_domain` (e.g., _myhost.example.com_). + base_domain: veltko.lan + + # Whether to use the local DNS settings of a node or override the local DNS + # settings (default) and force the use of Headscale's DNS configuration. + override_local_dns: false + + # List of DNS servers to expose to clients. + nameservers: + global: + - 1.1.1.1 + - 1.0.0.1 + - 2606:4700:4700::1111 + - 2606:4700:4700::1001 + + # NextDNS (see https://tailscale.com/kb/1218/nextdns/). + # "abc123" is example NextDNS ID, replace with yours. + # - https://dns.nextdns.io/abc123 + + # Split DNS (see https://tailscale.com/kb/1054/dns/), + # a map of domains and which DNS server to use for each. + split: {} + # foo.bar.com: + # - 1.1.1.1 + # darp.headscale.net: + # - 1.1.1.1 + # - 8.8.8.8 + + # Set custom DNS search domains. With MagicDNS enabled, + # your tailnet base_domain is always the first search domain. + search_domains: [] + + # Extra DNS records + # so far only A and AAAA records are supported (on the tailscale side) + # See: docs/ref/dns.md + extra_records: [] + # - name: "grafana.myvpn.example.com" + # type: "A" + # value: "100.64.0.3" + # + # # you can also put it in one line + # - { name: "prometheus.myvpn.example.com", type: "A", value: "100.64.0.3" } + # + # Alternatively, extra DNS records can be loaded from a JSON file. + # Headscale processes this file on each change. + # extra_records_path: /var/lib/headscale/extra-records.json + +# Unix socket used for the CLI to connect without authentication +# Note: for production you will want to set this to something like: +unix_socket: /var/run/headscale/headscale.sock +unix_socket_permission: "0770" + +# OpenID Connect +# oidc: +# # Block startup until the identity provider is available and healthy. +# only_start_if_oidc_is_available: true +# +# # OpenID Connect Issuer URL from the identity provider +# issuer: "https://your-oidc.issuer.com/path" +# +# # Client ID from the identity provider +# client_id: "your-oidc-client-id" +# +# # Client secret generated by the identity provider +# # Note: client_secret and client_secret_path are mutually exclusive. +# client_secret: "your-oidc-client-secret" +# # Alternatively, set `client_secret_path` to read the secret from the file. +# # It resolves environment variables, making integration to systemd's +# # `LoadCredential` straightforward: +# client_secret_path: "${CREDENTIALS_DIRECTORY}/oidc_client_secret" +# +# # The amount of time a node is authenticated with OpenID until it expires +# # and needs to reauthenticate. +# # Setting the value to "0" will mean no expiry. +# expiry: 180d +# +# # Use the expiry from the token received from OpenID when the user logged +# # in. This will typically lead to frequent need to reauthenticate and should +# # only be enabled if you know what you are doing. +# # Note: enabling this will cause `oidc.expiry` to be ignored. +# use_expiry_from_token: false +# +# # The OIDC scopes to use, defaults to "openid", "profile" and "email". +# # Custom scopes can be configured as needed, be sure to always include the +# # required "openid" scope. +# scope: ["openid", "profile", "email"] +# +# # Provide custom key/value pairs which get sent to the identity provider's +# # authorization endpoint. +# extra_params: +# domain_hint: example.com +# +# # Only accept users whose email domain is part of the allowed_domains list. +# allowed_domains: +# - example.com +# +# # Only accept users whose email address is part of the allowed_users list. +# allowed_users: +# - alice@example.com +# +# # Only accept users which are members of at least one group in the +# # allowed_groups list. +# allowed_groups: +# - /headscale +# +# # Optional: PKCE (Proof Key for Code Exchange) configuration +# # PKCE adds an additional layer of security to the OAuth 2.0 authorization code flow +# # by preventing authorization code interception attacks +# # See https://datatracker.ietf.org/doc/html/rfc7636 +# pkce: +# # Enable or disable PKCE support (default: false) +# enabled: false +# +# # PKCE method to use: +# # - plain: Use plain code verifier +# # - S256: Use SHA256 hashed code verifier (default, recommended) +# method: S256 + +# Logtail configuration +# Logtail is Tailscales logging and auditing infrastructure, it allows the +# control panel to instruct tailscale nodes to log their activity to a remote +# server. To disable logging on the client side, please refer to: +# https://tailscale.com/kb/1011/log-mesh-traffic#opting-out-of-client-logging +logtail: + # Enable logtail for tailscale nodes of this Headscale instance. + # As there is currently no support for overriding the log server in Headscale, this is + # disabled by default. Enabling this will make your clients send logs to Tailscale Inc. + enabled: false + +# Enabling this option makes devices prefer a random port for WireGuard traffic over the +# default static port 41641. This option is intended as a workaround for some buggy +# firewall devices. See https://tailscale.com/kb/1181/firewalls/ for more information. +randomize_client_port: false diff --git a/01-headscale/docker-compose.yaml b/01-headscale/docker-compose.yaml new file mode 100644 index 0000000..c647032 --- /dev/null +++ b/01-headscale/docker-compose.yaml @@ -0,0 +1,20 @@ +services: + headscale: + container_name: headscale + image: headscale/headscale:stable + restart: unless-stopped + volumes: + - ./config:/etc/headscale + - ./data:/var/lib/headscale + entrypoint: headscale serve + networks: + nw-caddy: + depends_on: + caddy: + condition: service_healthy + healthcheck: + test: ["CMD", "headscale", "health"] + interval: 5m + timeout: 10s + retries: 3 + start_period: 10s diff --git a/02-copyparty/config/copyparty.conf b/02-copyparty/config/copyparty.conf new file mode 100644 index 0000000..1574ec7 --- /dev/null +++ b/02-copyparty/config/copyparty.conf @@ -0,0 +1,15 @@ +[global] + e2dsa + e2ts + ansi + rproxy: -1 + xff-src: lan + +[accounts] + weckyy702:fP4Nsk2EVSLmDXBnNj0dG0lMsb$GcvE0K2!ja#YKv$DsGdtM#1NQO272AVd&ZkSG + +[/] + /w + accs: + rwmda: weckyy702 + g: * diff --git a/02-copyparty/docker-compose.yaml b/02-copyparty/docker-compose.yaml new file mode 100644 index 0000000..2806cf1 --- /dev/null +++ b/02-copyparty/docker-compose.yaml @@ -0,0 +1,19 @@ +services: + copyparty: + image: copyparty/ac:latest + container_name: copyparty + volumes: + - ./config:/cfg + - ./data:/w + networks: + nw-caddy: + depends_on: + caddy: + condition: service_healthy + stop_grace_period: 15s # thumbnailer is allowed to continue finishing up for 10s after the shutdown signal + healthcheck: + test: ["CMD-SHELL", "wget --spider -q 127.0.0.1:3923/?reset"] + interval: 15m + timeout: 2s + retries: 5 + start_period: 15s diff --git a/03-grafana/docker-compose.yaml b/03-grafana/docker-compose.yaml new file mode 100644 index 0000000..fa3a5b1 --- /dev/null +++ b/03-grafana/docker-compose.yaml @@ -0,0 +1,18 @@ +services: + grafana: + image: grafana/grafana:latest + container_name: grafana + networks: + nw-caddy: + depends_on: + caddy: + condition: service_healthy + volumes: + - ./data:/var/lib/grafana + - ./config:/etc/grafana/provisioning + user: + "1000" + environment: + env_file: .env + restart: unless-stopped + diff --git a/04-prometheus/config/prometheus.yaml b/04-prometheus/config/prometheus.yaml new file mode 100644 index 0000000..2d97f75 --- /dev/null +++ b/04-prometheus/config/prometheus.yaml @@ -0,0 +1,15 @@ +global: + scrape_interval: 15s +scrape_configs: + - job_name: caddy + static_configs: + - targets: ['caddy:2019'] + - job_name: prometheus + static_configs: + - targets: ["localhost:9090"] + - job_name: docker + static_configs: + - targets: ["host.docker.internal:9323"] + - job_name: host + static_configs: + - targets: ['host.docker.internal:9100'] diff --git a/04-prometheus/docker-compose.yaml b/04-prometheus/docker-compose.yaml new file mode 100644 index 0000000..26c923f --- /dev/null +++ b/04-prometheus/docker-compose.yaml @@ -0,0 +1,14 @@ +services: + prometheus: + image: prom/prometheus:latest + container_name: prometheus + networks: + nw-caddy: + extra_hosts: + - "host.docker.internal:host-gateway" + volumes: + - ./config:/etc/prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yaml' + - '--storage.tsdb.path=/prometheus' + restart: unless-stopped diff --git a/05-node_exporter/docker-compose.yaml b/05-node_exporter/docker-compose.yaml new file mode 100644 index 0000000..b19df6f --- /dev/null +++ b/05-node_exporter/docker-compose.yaml @@ -0,0 +1,11 @@ +services: + node_exporter: + image: quay.io/prometheus/node-exporter:latest + container_name: node_exporter + command: + - '--path.rootfs=/host' + network_mode: host + pid: host + restart: unless-stopped + volumes: + - '/:/host:ro,rslave' diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..399c33d --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,11 @@ +include: + - 00-caddy/docker-compose.yaml + - 01-headscale/docker-compose.yaml + - 02-copyparty/docker-compose.yaml + - 03-grafana/docker-compose.yaml + - 04-prometheus/docker-compose.yaml + - 05-node_exporter/docker-compose.yaml + +networks: + nw-caddy: + driver: bridge