Compare commits

..

17 Commits

7
.gitignore vendored

@ -1,4 +1,3 @@
secrets/ /secrets/
data/ /data/
.redo env.production
*.tmp

@ -1 +0,0 @@
DOMAIN=localhost

@ -0,0 +1,5 @@
DOMAIN=localhost
SMTP_USER=admin
SMTP_HOST=localhost
SMTP_PORT=587

@ -2,30 +2,76 @@
Experiment in digital autonomy Experiment in digital autonomy
Hosted on https://git.woodbine.nyc/micro/woodbine.nyc Latest code is hosted on https://git.woodbine.nyc/micro/woodbine.nyc
In general, everything is orchestrated by the compose files.
Sometimes, you will see a -setup service in the compose file.
This usually runs a script that checks or generates secrets, and does initial configuration if needed.
## Goals
We hope this is understandable by a single individual, after learning a bit about docker compose and caddy.
## setup
Make a backblaze B2 account for backups. Add the secrets to ./secrets/backup/.
Fill out env.template and make sure to pass it in the next command
## running ## running
docker-compose --env-file env.production \ We have two scripts in the `scripts/` directory - up and down
--file services/caddy.yaml \
--file services/zitadel.yaml \ ./scripts/up
up --build
To stop all the containers, you can ctrl+c, or
./scripts/down
To generate secrets for all services
./scripts/secrets
## port forwarding ## port forwarding
The caddy service expects to be able to bind to ports 80 and 443
One simple way is to allow unprivileged users access to these low ports
echo 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee -a /etc/sysctl.conf echo 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee -a /etc/sysctl.conf
sudo sysctl -w net.ipv4.ip_unprivileged_port_start=80 sudo sysctl -w net.ipv4.ip_unprivileged_port_start=80
## beta release ## alpha
- [x] caddy for homepage - [~] single sign-on (authelia)
- [x] webdav for personal home pages - [ ] per-user webdav folders via authelia
- [ ] backup using duplicity uploaded to backblaze b2 - [ ] any OIDC service setup
- [ ] restore using duplicity downloaded from backblaze b2 - [~] file backup (duplicity)
- [ ] zitadel sso - [ ] postgres backup (duplicity)
- [ ] decide on single postgres instance or multiple
- [x] reverse proxy (caddy)
- [x] personal home pages (caddy-webdav)
- [x] migrate from yaml to env for authelia config
- [x] setup notifications via smtp
## beta
- [ ] file restore
- [ ] postgres restore
- [ ] wiki - [ ] wiki
- [ ] dendrite matrix server - [ ] matrix server (dendrite)
- [ ] gitea - [ ] mail server (stalwart or maddy)
- [ ] mailing list (listmonk)
- [ ] code forge (gitea or forgejo)
## 0.1
- [ ] only expose 443, 587, 993
- [ ] running on betabasement-woodbine.nyc
- [ ] audit on secrets management
- [ ] audit on mail server
- [ ] audit on general architecture
## credits ## credits

@ -0,0 +1,7 @@
podman compose --env-file env.production \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
down --volumes

@ -0,0 +1,4 @@
echo generating zitadel secrets; {
openssl rand -hex 16 | tr -d '\n' >! secrets/auth/zitadel/MASTER_KEY
openssl rand -hex 32 | tr -d '\n' >! secrets/auth/zitadel/STORAGE_PASSWORD
}

@ -0,0 +1,7 @@
podman compose --env-file env.production \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
run "$@"

@ -0,0 +1,7 @@
podman compose --env-file env.production \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
up --build

@ -1 +1,3 @@
Do not check in anything in this directory Do not check in anything in this directory
Check out ../services/secrets.yaml on how to make it easy to check that secrets are defined, or to generate secrets on start

@ -0,0 +1,67 @@
version: "3.8"
secrets:
MASTER_KEY:
file: ../secrets/auth/zitadel/MASTER_KEY
services:
backup:
volumes:
- ../data/auth:/mnt/backup/src/auth:ro
generate-secrets:
volumes:
- ../secrets/auth/zitadel/MASTER_KEY:/secrets/auth/zitadel/MASTER_KEY
zitadel:
restart: 'unless-stopped'
image: 'ghcr.io/zitadel/zitadel:latest'
environment:
ZITADEL_DATABASE_COCKROACH_HOST: crdb
ZITADEL_EXTERNALSECURE: true
ZITADEL_EXTERNALDOMAIN: auth.${DOMAIN}
ZITADEL_EXTERNALPORT: 443
ZITADEL_WEBAUTHN_NAME: ${DOMAIN}
ZITADEL_FIRSTINSTANCE_ORG_NAME: basement
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_USERNAME: ${ADMIN_USER}
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORD: ${ADMIN_PASS}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROM: basement
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROMNAME: ${DOMAIN}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_HOST: "${SMTP_HOST}:${SMTP_PORT}"
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_USER: ${SMTP_USER}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_PASSWORD: ${SMTP_PASS}
secrets:
- MASTER_KEY
command: "start-from-init --masterkeyFile /run/secrets/MASTER_KEY --tlsMode external"
depends_on:
generate-secrets:
condition: 'service_completed_successfully'
caddy:
condition: 'service_healthy'
crdb:
condition: 'service_healthy'
ports:
- '8080:8080'
crdb:
restart: unless-stopped
image: 'cockroachdb/cockroach:latest-v23.1'
depends_on:
generate-secrets:
condition: 'service_completed_successfully'
command: "start-single-node --insecure --store=path=/cockroach/cockroach-data,size=20%"
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8080/health?ready=1"]
interval: '10s'
timeout: '30s'
retries: 5
start_period: '20s'
ports:
- '9090:8080'
- '26257:26257'
volumes:
- ../data/auth/crdb/data:/cockroach/cockroach-data:rw
caddy:
volumes:
- ./auth/Proxyfile:/etc/caddy.d/zitadel:ro

@ -0,0 +1,4 @@
auth.{$DOMAIN}:443 {
reverse_proxy zitadel:8080
tls internal
}

@ -0,0 +1,44 @@
version: "3.8"
secrets:
B2_APPLICATION_KEY:
file: ../secrets/backup/duplicity/B2_APPLICATION_KEY
B2_APPLICATION_KEY_ID:
file: ../secrets/backup/duplicity/B2_APPLICATION_KEY_ID
BUCKET_NAME:
file: ../secrets/backup/duplicity/BUCKET_NAME
PASSPHRASE:
file: ../secrets/backup/duplicity/PASSPHRASE
services:
backup:
image: tecnativa/docker-duplicity:latest
restart: unless-stopped
depends_on:
generate-secrets:
condition: 'service_completed_successfully'
secrets: [B2_APPLICATION_KEY, B2_APPLICATION_KEY_ID, BUCKET_NAME, PASSPHRASE]
environment:
HOSTNAME: ${DOMAIN}
TZ: America/New_York
volumes:
- ./backup/backup-files:/backup-files:ro
entrypoint: ["/bin/sh", "/backup-files"]
generate-secrets:
volumes:
- ../secrets/backup/duplicity/BUCKET_NAME:/secrets/backup/duplicity/BUCKET_NAME
- ../secrets/backup/duplicity/PASSPHRASE:/secrets/backup/duplicity/PASSPHRASE
# duplicity-postgres:
# image: tecnativa/docker-duplicity-postgres:latest
# restart: unless-stopped
# depends_on: [secrets]
# secrets: [B2_APPLICATION_KEY, B2_APPLICATION_KEY_ID, BUCKET_NAME, PASSPHRASE]
# environment:
# HOSTNAME: ${DOMAIN}
# TZ: America/New_York
# volumes:
# - ./backup/backup-databases:/backup-databases:ro
# entrypoint: ["/bin/sh", "/backup-databases"]

@ -0,0 +1,14 @@
read B2_APPLICATION_KEY_ID < /run/secrets/B2_APPLICATION_KEY_ID
read B2_APPLICATION_KEY < /run/secrets/B2_APPLICATION_KEY
read BUCKET_NAME < /run/secrets/BUCKET_NAME
export DST=b2://${B2_APPLICATION_KEY_ID}:${B2_APPLICATION_KEY}@${BUCKET_NAME}
read PASSPHRASE < /run/secrets/PASSPHRASE
export PASSPHRASE
for environment in /backup/*; do
. $environment
export PGHOST PGPASSWORD PGUSER DBS_TO_INCLUDE DBS_TO_EXCLUDE
/usr/local/bin/entrypoint
unset PGHOST PGPASSWORD PGUSER DBS_TO_INCLUDE DBS_TO_EXCLUDE
done

@ -0,0 +1,9 @@
read B2_APPLICATION_KEY_ID < /run/secrets/B2_APPLICATION_KEY_ID
read B2_APPLICATION_KEY < /run/secrets/B2_APPLICATION_KEY
read BUCKET_NAME < /run/secrets/BUCKET_NAME
export DST=b2://${B2_APPLICATION_KEY_ID}:${B2_APPLICATION_KEY}@${BUCKET_NAME}
read PASSPHRASE < /run/secrets/PASSPHRASE
export PASSPHRASE
/usr/local/bin/entrypoint

@ -1,14 +0,0 @@
. ../../env.production
service=$(basename $PWD)
secrets="../../secrets/$service"
read B2_APPLICATION_KEY_ID < $secrets/application-key-id
read B2_APPLICATION_KEY < $secrets/application-key
export BUCKET_NAME=${DOMAIN}-backup
export DESTINATION=b2://${B2_APPLICATION_KEY_ID}:${B2_APPLICATION_KEY}@${BUCKET_NAME}
read PASSPHRASE < $secrets/passphrase
env PASSPHRASE=$PASSPHRASE duplicity backup ../../data $DESTINATION >&2
env PASSPHRASE=$PASSPHRASE duplicity remove-older-than 28D $DESTINATION >&2

@ -1,20 +0,0 @@
version: "3.7"
services:
caddy:
image: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "443:443/udp"
volumes:
- ./caddy/Caddyfile:/etc/caddy/Caddyfile
- ../data/caddy/site:/site
- ../data/caddy/data:/data
- caddy_config:/config
environment:
- DOMAIN
volumes:
caddy_config:

@ -1,9 +0,0 @@
{$DOMAIN} {
file_server {
root /site
}
}
web.{$DOMAIN} {
reverse_proxy services-web-1:4431
}

@ -0,0 +1,57 @@
version: "3.8"
secrets:
SMTP_PASSWORD:
file: ../secrets/mail/SMTP_PASSWORD
services:
generate-secrets:
volumes:
- ../secrets/mail/maddy/SMTP_PASSWORD:/secrets/mail/maddy/SMTP_PASSWORD
backup:
volumes:
- ../data/mail:/mnt/backup/src/mail:ro
caddy:
volumes:
- ./mail/Proxyfile:/etc/caddy.d/mail:ro
maddy:
image: foxcpp/maddy:latest
secrets: [SMTP_PASSWORD]
restart: unless-stopped
depends_on:
generate-secrets:
condition: 'service_completed_successfully'
environment:
- MADDY_HOSTNAME=mx.mail.${DOMAIN}
- MADDY_DOMAIN=mail.${DOMAIN}
volumes:
- ../data/mail/maddy:/data
# TODO: get from caddy?
#- ../secrets/tls/fullchain.pem:/data/tls/fullchain.pem:ro
#- ../secrets/tls/privkey.pem:/data/tls/privkey.pem:ro
ports:
- 25:25
- 143:143
- 587:587
- 993:993
roundcube:
image: roundcube/roundcubemail:1.6.x-apache
environment:
ROUNDCUBEMAIL_DEFAULT_HOST: ssl://mx.mail.${DOMAIN}
ROUNDCUBEMAIL_DEFAULT_PORT: 993
ROUNDCUBEMAIL_SMTP_SERVER: tls://mx.mail.${DOMAIN}
ROUNDCUBEMAIL_SMTP_PORT: 587
ROUNDCUBEMAIL_DB_TYPE: sqlite
volumes:
- ../data/mail/roundcube/db:/var/roundcube/db
ports:
- 9002:80
check-secrets:
secrets:
- SMTP_PASSWORD

@ -0,0 +1,4 @@
mail.{$DOMAIN} {
reverse_proxy roundcube:9002
}

@ -0,0 +1,27 @@
version: "3.8"
services:
caddy:
image: caddy
restart: unless-stopped
ports:
- "80:80"
- "443:443"
- "443:443/udp"
volumes:
- ./proxy/Caddyfile:/etc/caddy/Caddyfile
- ../data/proxy/caddy/site:/site
- ../data/proxy/caddy/data:/data
- ../data/proxy/caddy/config:/config
environment:
- DOMAIN
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost"]
interval: '10s'
timeout: '30s'
retries: 5
start_period: '20s'
backup:
volumes:
- ../data/proxy:/mnt/backup/src/proxy:ro

@ -0,0 +1,7 @@
{$DOMAIN} {
file_server {
root /site
}
}
import /etc/caddy.d/*

@ -16,9 +16,9 @@ we have a backup script that uses duplicity, this should be moved into a contain
caddy is the web server, and handles https certificates, and proxying to all the services. caddy is the web server, and handles https certificates, and proxying to all the services.
#### [Zitadel](https://zitadel.com/docs) **WIP** #### [Authelia](https://www.authelia.com/overview/prologue/introduction/) **WIP**
zitadel lets you have a single username and password to sign on to all your services. authelia lets you have a single username and password to sign on to all your services.
### Optional Services ### Optional Services
@ -31,14 +31,48 @@ without having to sync anything.
There are three things to think about when adding a service: There are three things to think about when adding a service:
1. How to enable sign-in with zitadel? 1. How to enable sign-on?
Generally, zitadel has some cli commands that we have put in scripts in the zitadel folder. Look at https://www.authelia.com/integration/openid-connect/introduction/ for integration guides.
2. How to expose as a subdomain in caddy? 2. How to expose as a subdomain?
Add a volume mount of your reverse proxy config to your compose file.
# in the services: part of your compose file
caddy:
volumes:
- ./some-service/Proxyfile:/etc/caddy.d/some-service
# Proxyfile looks something like
someservice.{$DOMAIN} {
reverse_proxy someservice:4321
}
You will want to make a Caddyfile, which will get mounted by the Caddy compose file.
3. How will this be backed up and restored? 3. How will this be backed up and restored?
We backup all files in the data/ directory, but if your service interacts with a database like postgres, will need additional work. For plain files, add the appropriate volume mount like so:
# in the services: part of your compose file
backup:
volumes:
- ../data/some-service:/mnt/backup/src/some-service:ro
This will be backed up according to the plan in [the backup service](./backup.yaml)
For postgres databases, we are figuring out the best way
4. How do we manage secrets?
If your service requires secrets, you can use docker secrets, and have them generated on startup as follows:
# in the services: part of your compose file
some-service:
depends_on:
- secrets
secrets:
volumes:
- ../secrets/some-service/SECRET_TO_INITIALIZE_IF_EMPTY:/secrets/some-service/SECRET_TO_INITIALIZE_IF_EMPTY

@ -0,0 +1,16 @@
version: "3.8"
services:
generate-secrets:
image: alpine/openssl
restart: no
volumes:
- ./secrets/generate-secrets:/generate-secrets:ro
entrypoint: ["/generate-secrets"]
check-secrets:
image: alpine
restart: no
volumes:
- ./secrets/check-secrets:/check-secrets:ro
entrypoint: ["/check-secrets"]

@ -0,0 +1,14 @@
#!/usr/bin/env sh
# this throws an error if any secrets are empty
set -o errexit
set -o nounset
set -o pipefail
for secret in /run/secrets/* ; do
if [ -s "$secret" ]; then
>&2 echo "ERROR: empty secret: $(basename $secret)"
exit 1
fi
done

@ -0,0 +1,13 @@
#!/usr/bin/env sh
# this generates a random 64 char hex string for all empty secret files in /secrets/*/*/*
set -o errexit
set -o nounset
set -o pipefail
for secret in /secrets/*/*/* ; do
test -d "$secret" && rmdir "$secret"
test -s "$secret" && continue
openssl rand -hex ${2:-64} > $secret
done

@ -0,0 +1,3 @@
auth.{$DOMAIN} {
reverse_proxy authelia:9091
}

@ -0,0 +1 @@
authelia is our single sign-on

@ -0,0 +1,89 @@
version: "3.8"
services:
postgresql:
image: docker.io/library/postgres:12-alpine
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
volumes:
- database:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
POSTGRES_USER: ${PG_USER:-authentik}
POSTGRES_DB: ${PG_DB:-authentik}
redis:
image: docker.io/library/redis:alpine
command: --save 60 1 --loglevel warning
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
start_period: 20s
interval: 30s
retries: 5
timeout: 3s
volumes:
- redis:/data
authentik:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.2}
restart: unless-stopped
command: server
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
volumes:
- ../data/authentik/media:/media
- ../data/authentik/custom-templates:/templates
ports:
- "${COMPOSE_PORT_HTTP:-9000}:9000"
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
depends_on:
- postgresql
- redis
worker:
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.2}
restart: unless-stopped
command: worker
environment:
AUTHENTIK_REDIS__HOST: redis
AUTHENTIK_POSTGRESQL__HOST: postgresql
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
# `user: root` and the docker socket volume are optional.
# See more for the docker socket integration here:
# https://goauthentik.io/docs/outposts/integrations/docker
# Removing `user: root` also prevents the worker from fixing the permissions
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
# (1000:1000 by default)
user: root
volumes:
- /var/run/docker.sock:/var/run/docker.sock
- ../data/authentik/media:/media
- ../data/authentik/custom-templates:/templates
- ../secrets/authentik/certs:/certs
depends_on:
- postgresql
- redis
# setup a reverse proxy for caddy
caddy:
volumes:
- ./authentik/Proxyfile:/etc/caddy.d/authentik:ro
# backup the zitadel folder
backup:
volumes:
- ../data/authentik:/mnt/backup/src/authentik:ro
volumes:
database:
driver: local
redis:
driver: local

@ -0,0 +1,3 @@
auth.{$DOMAIN} {
reverse_proxy authentik:9000
}

@ -1,4 +1,4 @@
version: "3.7" version: "3.8"
services: services:
web: web:
@ -6,6 +6,8 @@ services:
context: ./web context: ./web
dockerfile: Containerfile dockerfile: Containerfile
restart: unless-stopped restart: unless-stopped
depends_on:
- caddy
ports: ports:
- "8081:80" - "8081:80"
- "4431:443" - "4431:443"
@ -14,9 +16,12 @@ services:
- ./web/Caddyfile:/etc/caddy/Caddyfile - ./web/Caddyfile:/etc/caddy/Caddyfile
- ../data/web/site:/site - ../data/web/site:/site
- ../data/web/data:/data - ../data/web/data:/data
- caddy_config:/config - ../data/web/config:/config
environment:
- DOMAIN
volumes: caddy:
caddy_config: volumes:
- ./web/Proxyfile:/etc/caddy.d/web:ro
backup:
volumes:
- ../data/web:/mnt/backup/src/web:ro

@ -1,8 +1,8 @@
FROM caddy:2.7.5-builder-alpine AS builder FROM caddy:builder-alpine AS builder
RUN xcaddy build \ RUN xcaddy build \
--with github.com/mholt/caddy-webdav --with github.com/mholt/caddy-webdav
FROM caddy:latest FROM caddy:alpine
COPY --from=builder /usr/bin/caddy /usr/bin/caddy COPY --from=builder /usr/bin/caddy /usr/bin/caddy

@ -0,0 +1,9 @@
web.{$DOMAIN} {
# forward_auth authelia:9091 {
# uri /api/verify?rd=https://auth.{$DOMAIN}/
# copy_headers Remote-User Remote-Groups Remote-Name Remote-Email
# }
reverse_proxy web:4431
}

@ -1,36 +0,0 @@
version: '3.8'
services:
zitadel:
restart: 'always'
networks:
- 'zitadel'
image: 'ghcr.io/zitadel/zitadel:latest'
command: 'start-from-init --masterkey "6cd52ccbc4da912319f0fdc016d68575dd391bd932ebdc045c89b2dce9e90315" --tlsMode disabled'
environment:
- 'ZITADEL_DATABASE_COCKROACH_HOST=crdb'
- 'ZITADEL_EXTERNALSECURE=false'
depends_on:
crdb:
condition: 'service_healthy'
ports:
- '8123:8080'
crdb:
restart: 'always'
networks:
- 'zitadel'
image: 'cockroachdb/cockroach:v22.2.2'
command: 'start-single-node --insecure'
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8080/health?ready=1"]
interval: '10s'
timeout: '30s'
retries: 5
start_period: '20s'
ports:
- '9090:8080'
- '26257:26257'
networks:
zitadel:
Loading…
Cancel
Save