Compare commits
37 Commits
caddy-dock
...
main
Author | SHA1 | Date |
---|---|---|
![]() |
a32e0b201f | 1 year ago |
![]() |
9e0e117aa0 | 1 year ago |
![]() |
77298265db | 1 year ago |
![]() |
0cc84f1a76 | 1 year ago |
![]() |
1efbd62689 | 1 year ago |
![]() |
0b8bc032ff | 1 year ago |
![]() |
3775b3d5d6 | 1 year ago |
![]() |
3e24913eca | 1 year ago |
![]() |
2b9450ddd1 | 1 year ago |
![]() |
0a3ada3328 | 1 year ago |
![]() |
13bbf292a1 | 1 year ago |
![]() |
2faca11577 | 2 years ago |
![]() |
0dc484b2f4 | 2 years ago |
![]() |
82c41039c2 | 2 years ago |
![]() |
34d07b7dfd | 2 years ago |
![]() |
a239dd4ac5 | 2 years ago |
![]() |
ac50279901 | 2 years ago |
![]() |
aaf706bf77 | 2 years ago |
![]() |
e44dca5a73 | 2 years ago |
![]() |
55fe7de241 | 2 years ago |
![]() |
90d338b303 | 2 years ago |
![]() |
72d21c65db | 2 years ago |
![]() |
c54502e9b2 | 2 years ago |
![]() |
093c844366 | 2 years ago |
![]() |
660f5a39ee | 2 years ago |
![]() |
792e90aaa2 | 2 years ago |
![]() |
ea3c4d9016 | 2 years ago |
![]() |
964c06895b | 2 years ago |
![]() |
31d90f8b8a | 2 years ago |
![]() |
1781cb4c50 | 2 years ago |
![]() |
1ddc1ac083 | 2 years ago |
![]() |
487ed7b64d | 2 years ago |
![]() |
69d8a7cd51 | 2 years ago |
![]() |
e05a45afe7 | 2 years ago |
![]() |
7853e29727 | 2 years ago |
![]() |
33949b53a0 | 2 years ago |
![]() |
8a077a7b4c | 2 years ago |
@ -1,4 +1,3 @@
|
||||
secrets/
|
||||
data/
|
||||
.redo
|
||||
*.tmp
|
||||
/secrets/
|
||||
/data/
|
||||
env.production
|
||||
|
@ -1 +0,0 @@
|
||||
DOMAIN=localhost
|
@ -0,0 +1,15 @@
|
||||
# copy this to .env and it will be sourced by the appropriate services
|
||||
|
||||
# domain your services will be running on
|
||||
DOMAIN=localhost
|
||||
|
||||
# admin user for auth
|
||||
ADMIN_USER=
|
||||
ADMIN_PASS=
|
||||
|
||||
# used for sending notifications and reset passwords
|
||||
# only supports smtp+starttls
|
||||
SMTP_ADDR=
|
||||
SMTP_PORT=587
|
||||
SMTP_USER=
|
||||
SMTP_PASS=
|
@ -0,0 +1,8 @@
|
||||
podman compose --env-file ${ENV_FILE:-.env} \
|
||||
--file services/secrets.yaml \
|
||||
--file services/backup.yaml \
|
||||
--file services/proxy.yaml \
|
||||
--file services/auth.yaml \
|
||||
--file services/web.yaml \
|
||||
--file services/git.yaml \
|
||||
down --volumes
|
@ -0,0 +1,8 @@
|
||||
podman compose --env-file ${ENV_FILE:-.env} \
|
||||
--file services/secrets.yaml \
|
||||
--file services/backup.yaml \
|
||||
--file services/proxy.yaml \
|
||||
--file services/auth.yaml \
|
||||
--file services/web.yaml \
|
||||
--file services/git.yaml \
|
||||
exec "$@"
|
@ -0,0 +1,4 @@
|
||||
echo generating zitadel secrets; {
|
||||
openssl rand -hex 16 | tr -d '\n' >! secrets/auth/zitadel/MASTER_KEY
|
||||
openssl rand -hex 32 | tr -d '\n' >! secrets/auth/zitadel/STORAGE_PASSWORD
|
||||
}
|
@ -0,0 +1,8 @@
|
||||
podman compose --env-file ${ENV_FILE:-.env} \
|
||||
--file services/secrets.yaml \
|
||||
--file services/backup.yaml \
|
||||
--file services/proxy.yaml \
|
||||
--file services/auth.yaml \
|
||||
--file services/web.yaml \
|
||||
--file services/git.yaml \
|
||||
ps
|
@ -0,0 +1,8 @@
|
||||
podman compose --env-file ${ENV_FILE:-.env} \
|
||||
--file services/secrets.yaml \
|
||||
--file services/backup.yaml \
|
||||
--file services/proxy.yaml \
|
||||
--file services/auth.yaml \
|
||||
--file services/web.yaml \
|
||||
--file services/git.yaml \
|
||||
pull
|
@ -0,0 +1,8 @@
|
||||
podman compose --env-file ${ENV_FILE:-.env} \
|
||||
--file services/secrets.yaml \
|
||||
--file services/backup.yaml \
|
||||
--file services/proxy.yaml \
|
||||
--file services/auth.yaml \
|
||||
--file services/web.yaml \
|
||||
--file services/git.yaml \
|
||||
run "$@"
|
@ -0,0 +1,8 @@
|
||||
podman compose --env-file ${ENV_FILE:-.env} \
|
||||
--file services/secrets.yaml \
|
||||
--file services/backup.yaml \
|
||||
--file services/proxy.yaml \
|
||||
--file services/auth.yaml \
|
||||
--file services/web.yaml \
|
||||
--file services/git.yaml \
|
||||
up --build
|
@ -1 +1,3 @@
|
||||
Do not check in anything in this directory
|
||||
|
||||
Check out ../services/secrets.yaml on how to make it easy to check that secrets are defined, or to generate secrets on start
|
||||
|
@ -0,0 +1,69 @@
|
||||
secrets:
|
||||
MASTER_KEY:
|
||||
file: ../secrets/auth/zitadel/MASTER_KEY
|
||||
|
||||
services:
|
||||
backup:
|
||||
volumes:
|
||||
- ../data/auth:/mnt/backup/src/auth:ro
|
||||
|
||||
generate-secrets:
|
||||
volumes:
|
||||
- ../secrets/auth/zitadel/MASTER_KEY:/secrets/auth/zitadel/MASTER_KEY
|
||||
|
||||
zitadel:
|
||||
restart: 'unless-stopped'
|
||||
image: 'ghcr.io/zitadel/zitadel:v2.48.3'
|
||||
environment:
|
||||
ZITADEL_DATABASE_COCKROACH_HOST: crdb
|
||||
ZITADEL_EXTERNALSECURE: true
|
||||
ZITADEL_EXTERNALDOMAIN: auth.${DOMAIN}
|
||||
ZITADEL_EXTERNALPORT: 443
|
||||
ZITADEL_WEBAUTHN_NAME: ${DOMAIN}
|
||||
ZITADEL_FIRSTINSTANCE_ORG_NAME: basement
|
||||
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_USERNAME: ${ADMIN_USER}
|
||||
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORD: ${ADMIN_PASS}
|
||||
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_HOST: "${SMTP_ADDR}:${SMTP_PORT}"
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_USER: ${SMTP_USER}
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_PASSWORD: ${SMTP_PASS}
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_SSL: true
|
||||
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROM: basement@mail.${DOMAIN}
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROMNAME: basement
|
||||
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_REPLYTOADDRESS: basement@mail.${DOMAIN}
|
||||
secrets:
|
||||
- MASTER_KEY
|
||||
command: "start-from-init --masterkeyFile /run/secrets/MASTER_KEY --tlsMode external"
|
||||
depends_on:
|
||||
generate-secrets:
|
||||
condition: 'service_completed_successfully'
|
||||
caddy:
|
||||
condition: 'service_healthy'
|
||||
crdb:
|
||||
condition: 'service_healthy'
|
||||
ports:
|
||||
- '8080:8080'
|
||||
|
||||
crdb:
|
||||
restart: unless-stopped
|
||||
image: 'cockroachdb/cockroach:latest-v23.1'
|
||||
depends_on:
|
||||
generate-secrets:
|
||||
condition: 'service_completed_successfully'
|
||||
command: "start-single-node --insecure --store=path=/cockroach/cockroach-data,size=20%"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "--fail", "http://localhost:8080/health?ready=1"]
|
||||
interval: '10s'
|
||||
timeout: '30s'
|
||||
retries: 5
|
||||
start_period: '20s'
|
||||
ports:
|
||||
- '9090:8080'
|
||||
- '26257:26257'
|
||||
volumes:
|
||||
- ../data/auth/crdb/data:/cockroach/cockroach-data:rw
|
||||
|
||||
caddy:
|
||||
volumes:
|
||||
- ./auth/Proxyfile:/etc/caddy.d/zitadel:ro
|
@ -0,0 +1,4 @@
|
||||
auth.{$DOMAIN}:443 {
|
||||
reverse_proxy zitadel:8080
|
||||
tls internal
|
||||
}
|
@ -0,0 +1,42 @@
|
||||
secrets:
|
||||
B2_APPLICATION_KEY:
|
||||
file: ../secrets/backup/duplicity/B2_APPLICATION_KEY
|
||||
B2_APPLICATION_KEY_ID:
|
||||
file: ../secrets/backup/duplicity/B2_APPLICATION_KEY_ID
|
||||
BUCKET_NAME:
|
||||
file: ../secrets/backup/duplicity/BUCKET_NAME
|
||||
PASSPHRASE:
|
||||
file: ../secrets/backup/duplicity/PASSPHRASE
|
||||
|
||||
services:
|
||||
backup:
|
||||
image: ghcr.io/tecnativa/docker-duplicity:3.3.1
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
generate-secrets:
|
||||
condition: 'service_completed_successfully'
|
||||
secrets: [B2_APPLICATION_KEY, B2_APPLICATION_KEY_ID, BUCKET_NAME, PASSPHRASE]
|
||||
environment:
|
||||
HOSTNAME: ${DOMAIN}
|
||||
TZ: America/New_York
|
||||
volumes:
|
||||
- ./backup/backup-files:/backup-files:ro
|
||||
entrypoint: ["/bin/sh", "/backup-files"]
|
||||
|
||||
generate-secrets:
|
||||
volumes:
|
||||
- ../secrets/backup/duplicity/BUCKET_NAME:/secrets/backup/duplicity/BUCKET_NAME
|
||||
- ../secrets/backup/duplicity/PASSPHRASE:/secrets/backup/duplicity/PASSPHRASE
|
||||
|
||||
|
||||
# duplicity-postgres:
|
||||
# image: tecnativa/docker-duplicity-postgres:latest
|
||||
# restart: unless-stopped
|
||||
# depends_on: [secrets]
|
||||
# secrets: [B2_APPLICATION_KEY, B2_APPLICATION_KEY_ID, BUCKET_NAME, PASSPHRASE]
|
||||
# environment:
|
||||
# HOSTNAME: ${DOMAIN}
|
||||
# TZ: America/New_York
|
||||
# volumes:
|
||||
# - ./backup/backup-databases:/backup-databases:ro
|
||||
# entrypoint: ["/bin/sh", "/backup-databases"]
|
@ -0,0 +1,14 @@
|
||||
read B2_APPLICATION_KEY_ID < /run/secrets/B2_APPLICATION_KEY_ID
|
||||
read B2_APPLICATION_KEY < /run/secrets/B2_APPLICATION_KEY
|
||||
read BUCKET_NAME < /run/secrets/BUCKET_NAME
|
||||
export DST=b2://${B2_APPLICATION_KEY_ID}:${B2_APPLICATION_KEY}@${BUCKET_NAME}
|
||||
|
||||
read PASSPHRASE < /run/secrets/PASSPHRASE
|
||||
export PASSPHRASE
|
||||
|
||||
for environment in /backup/*; do
|
||||
. $environment
|
||||
export PGHOST PGPASSWORD PGUSER DBS_TO_INCLUDE DBS_TO_EXCLUDE
|
||||
/usr/local/bin/entrypoint
|
||||
unset PGHOST PGPASSWORD PGUSER DBS_TO_INCLUDE DBS_TO_EXCLUDE
|
||||
done
|
@ -0,0 +1,9 @@
|
||||
read B2_APPLICATION_KEY_ID < /run/secrets/B2_APPLICATION_KEY_ID
|
||||
read B2_APPLICATION_KEY < /run/secrets/B2_APPLICATION_KEY
|
||||
read BUCKET_NAME < /run/secrets/BUCKET_NAME
|
||||
export DST=b2://${B2_APPLICATION_KEY_ID}:${B2_APPLICATION_KEY}@${BUCKET_NAME}
|
||||
|
||||
read PASSPHRASE < /run/secrets/PASSPHRASE
|
||||
export PASSPHRASE
|
||||
|
||||
/usr/local/bin/entrypoint
|
@ -1,14 +0,0 @@
|
||||
. ../../env.production
|
||||
|
||||
service=$(basename $PWD)
|
||||
secrets="../../secrets/$service"
|
||||
|
||||
read B2_APPLICATION_KEY_ID < $secrets/application-key-id
|
||||
read B2_APPLICATION_KEY < $secrets/application-key
|
||||
export BUCKET_NAME=${DOMAIN}-backup
|
||||
|
||||
export DESTINATION=b2://${B2_APPLICATION_KEY_ID}:${B2_APPLICATION_KEY}@${BUCKET_NAME}
|
||||
|
||||
read PASSPHRASE < $secrets/passphrase
|
||||
env PASSPHRASE=$PASSPHRASE duplicity backup ../../data $DESTINATION >&2
|
||||
env PASSPHRASE=$PASSPHRASE duplicity remove-older-than 28D $DESTINATION >&2
|
@ -1,20 +0,0 @@
|
||||
version: "3.7"
|
||||
|
||||
services:
|
||||
caddy:
|
||||
image: caddy
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "443:443/udp"
|
||||
volumes:
|
||||
- ./caddy/Caddyfile:/etc/caddy/Caddyfile
|
||||
- ../data/caddy/site:/site
|
||||
- ../data/caddy/data:/data
|
||||
- caddy_config:/config
|
||||
environment:
|
||||
- DOMAIN
|
||||
|
||||
volumes:
|
||||
caddy_config:
|
@ -1,9 +0,0 @@
|
||||
{$DOMAIN} {
|
||||
file_server {
|
||||
root /site
|
||||
}
|
||||
}
|
||||
|
||||
web.{$DOMAIN} {
|
||||
reverse_proxy services-web-1:4431
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
secrets:
|
||||
DB_PASSWD:
|
||||
file: ../secrets/git/gitea/DB_PASSWD
|
||||
|
||||
services:
|
||||
caddy:
|
||||
volumes:
|
||||
- ./git/Proxyfile:/etc/caddy.d/git
|
||||
backup:
|
||||
volumes:
|
||||
- ../data/git:/mnt/backup/src/git
|
||||
|
||||
gitea:
|
||||
image: gitea/gitea:1.21.3-rootless
|
||||
secrets: [ DB_PASSWD ]
|
||||
environment:
|
||||
GITEA__database__DB_TYPE: postgres
|
||||
GITEA__database__HOST: "db:5432"
|
||||
GITEA__database__NAME: gitea
|
||||
GITEA__database__USER: gitea
|
||||
GITEA__database__PASSWD__FILE: /run/secrets/DB_PASSWD
|
||||
GITEA__mailer__ENABLED: true
|
||||
GITEA__mailer__FROM: gitea@mail.${DOMAIN}
|
||||
GITEA__mailer__PROTOCOL: smtp+starttls
|
||||
GITEA__mailer__SMTP_ADDR: ${SMTP_ADDR}
|
||||
GITEA__mailer__SMTP_PORT: ${SMTP_PORT}
|
||||
GITEA__mailer__USER: ${SMTP_USER}
|
||||
GITEA__mailer__PASSWD: ${SMTP_PASS}
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ../data/git/gitea/data:/data
|
||||
ports:
|
||||
- 3000:3000
|
||||
db:
|
||||
image: postgres:16.1-alpine
|
||||
secrets: [ DB_PASSWD ]
|
||||
environment:
|
||||
POSTGRES_USER: gitea
|
||||
POSTGRES_PASSWORD_FILE: /run/secrets/DB_PASSWD
|
||||
POSTGRES_DB: gitea
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- db_data:/var/lib/postgresql/data
|
||||
expose:
|
||||
- 5432
|
||||
|
||||
volumes:
|
||||
db_data:
|
@ -0,0 +1,3 @@
|
||||
git.{$DOMAIN} {
|
||||
reverse_proxy gitea:3000
|
||||
}
|
@ -0,0 +1,54 @@
|
||||
secrets:
|
||||
SMTP_PASSWORD:
|
||||
file: ../secrets/mail/SMTP_PASSWORD
|
||||
|
||||
services:
|
||||
generate-secrets:
|
||||
volumes:
|
||||
- ../secrets/mail/maddy/SMTP_PASSWORD:/secrets/mail/maddy/SMTP_PASSWORD
|
||||
|
||||
backup:
|
||||
volumes:
|
||||
- ../data/mail:/mnt/backup/src/mail:ro
|
||||
|
||||
caddy:
|
||||
volumes:
|
||||
- ./mail/Proxyfile:/etc/caddy.d/mail:ro
|
||||
|
||||
maddy:
|
||||
image: foxcpp/maddy:0.7
|
||||
secrets: [SMTP_PASSWORD]
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
generate-secrets:
|
||||
condition: 'service_completed_successfully'
|
||||
environment:
|
||||
- MADDY_HOSTNAME=mx.mail.${DOMAIN}
|
||||
- MADDY_DOMAIN=mail.${DOMAIN}
|
||||
volumes:
|
||||
- ../data/mail/maddy:/data
|
||||
# TODO: get from caddy?
|
||||
#- ../secrets/tls/fullchain.pem:/data/tls/fullchain.pem:ro
|
||||
#- ../secrets/tls/privkey.pem:/data/tls/privkey.pem:ro
|
||||
ports:
|
||||
- 25:25
|
||||
- 143:143
|
||||
- 587:587
|
||||
- 993:993
|
||||
|
||||
roundcube:
|
||||
image: roundcube/roundcubemail:1.6.5-fpm-alpine
|
||||
environment:
|
||||
ROUNDCUBEMAIL_DEFAULT_HOST: ssl://mx.mail.${DOMAIN}
|
||||
ROUNDCUBEMAIL_DEFAULT_PORT: 993
|
||||
ROUNDCUBEMAIL_SMTP_SERVER: tls://mx.mail.${DOMAIN}
|
||||
ROUNDCUBEMAIL_SMTP_PORT: 587
|
||||
ROUNDCUBEMAIL_DB_TYPE: sqlite
|
||||
volumes:
|
||||
- ../data/mail/roundcube/db:/var/roundcube/db
|
||||
ports:
|
||||
- 9002:80
|
||||
|
||||
check-secrets:
|
||||
secrets:
|
||||
- SMTP_PASSWORD
|
@ -0,0 +1,4 @@
|
||||
mail.{$DOMAIN} {
|
||||
reverse_proxy roundcube:9002
|
||||
}
|
||||
|
@ -0,0 +1,25 @@
|
||||
services:
|
||||
caddy:
|
||||
image: caddy
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "443:443/udp"
|
||||
volumes:
|
||||
- ./proxy/Caddyfile:/etc/caddy/Caddyfile
|
||||
- ../data/proxy/caddy/site:/site
|
||||
- ../data/proxy/caddy/data:/data
|
||||
- ../data/proxy/caddy/config:/config
|
||||
environment:
|
||||
- DOMAIN
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost"]
|
||||
interval: '10s'
|
||||
timeout: '30s'
|
||||
retries: 5
|
||||
start_period: '20s'
|
||||
|
||||
backup:
|
||||
volumes:
|
||||
- ../data/proxy:/mnt/backup/src/proxy:ro
|
@ -0,0 +1,7 @@
|
||||
{$DOMAIN} {
|
||||
file_server {
|
||||
root /site
|
||||
}
|
||||
}
|
||||
|
||||
import /etc/caddy.d/*
|
@ -0,0 +1,14 @@
|
||||
services:
|
||||
generate-secrets:
|
||||
image: alpine/openssl
|
||||
restart: no
|
||||
volumes:
|
||||
- ./secrets/generate-secrets:/generate-secrets:ro
|
||||
entrypoint: ["/generate-secrets"]
|
||||
|
||||
check-secrets:
|
||||
image: alpine
|
||||
restart: no
|
||||
volumes:
|
||||
- ./secrets/check-secrets:/check-secrets:ro
|
||||
entrypoint: ["/check-secrets"]
|
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# this throws an error if any secrets are empty
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
for secret in /run/secrets/* ; do
|
||||
if [ -s "$secret" ]; then
|
||||
>&2 echo "ERROR: empty secret: $(basename $secret)"
|
||||
exit 1
|
||||
fi
|
||||
done
|
@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env sh
|
||||
|
||||
# this generates a random 64 char hex string for all empty secret files in /secrets/*/*/*
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
for secret in /secrets/*/*/* ; do
|
||||
test -d "$secret" && rmdir "$secret"
|
||||
test -s "$secret" && continue
|
||||
openssl rand -hex ${2:-64} > $secret
|
||||
done
|
@ -0,0 +1,3 @@
|
||||
auth.{$DOMAIN} {
|
||||
reverse_proxy authelia:9091
|
||||
}
|
@ -0,0 +1,2 @@
|
||||
notifier:
|
||||
smtp:
|
@ -0,0 +1 @@
|
||||
authelia is our single sign-on
|
@ -0,0 +1,89 @@
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
postgresql:
|
||||
image: docker.io/library/postgres:12-alpine
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -d $${POSTGRES_DB} -U $${POSTGRES_USER}"]
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 5s
|
||||
volumes:
|
||||
- database:/var/lib/postgresql/data
|
||||
environment:
|
||||
POSTGRES_PASSWORD: ${PG_PASS:?database password required}
|
||||
POSTGRES_USER: ${PG_USER:-authentik}
|
||||
POSTGRES_DB: ${PG_DB:-authentik}
|
||||
redis:
|
||||
image: docker.io/library/redis:alpine
|
||||
command: --save 60 1 --loglevel warning
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "redis-cli ping | grep PONG"]
|
||||
start_period: 20s
|
||||
interval: 30s
|
||||
retries: 5
|
||||
timeout: 3s
|
||||
volumes:
|
||||
- redis:/data
|
||||
authentik:
|
||||
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.2}
|
||||
restart: unless-stopped
|
||||
command: server
|
||||
environment:
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
|
||||
volumes:
|
||||
- ../data/authentik/media:/media
|
||||
- ../data/authentik/custom-templates:/templates
|
||||
ports:
|
||||
- "${COMPOSE_PORT_HTTP:-9000}:9000"
|
||||
- "${COMPOSE_PORT_HTTPS:-9443}:9443"
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
worker:
|
||||
image: ${AUTHENTIK_IMAGE:-ghcr.io/goauthentik/server}:${AUTHENTIK_TAG:-2023.10.2}
|
||||
restart: unless-stopped
|
||||
command: worker
|
||||
environment:
|
||||
AUTHENTIK_REDIS__HOST: redis
|
||||
AUTHENTIK_POSTGRESQL__HOST: postgresql
|
||||
AUTHENTIK_POSTGRESQL__USER: ${PG_USER:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__NAME: ${PG_DB:-authentik}
|
||||
AUTHENTIK_POSTGRESQL__PASSWORD: ${PG_PASS}
|
||||
# `user: root` and the docker socket volume are optional.
|
||||
# See more for the docker socket integration here:
|
||||
# https://goauthentik.io/docs/outposts/integrations/docker
|
||||
# Removing `user: root` also prevents the worker from fixing the permissions
|
||||
# on the mounted folders, so when removing this make sure the folders have the correct UID/GID
|
||||
# (1000:1000 by default)
|
||||
user: root
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- ../data/authentik/media:/media
|
||||
- ../data/authentik/custom-templates:/templates
|
||||
- ../secrets/authentik/certs:/certs
|
||||
depends_on:
|
||||
- postgresql
|
||||
- redis
|
||||
# setup a reverse proxy for caddy
|
||||
caddy:
|
||||
volumes:
|
||||
- ./authentik/Proxyfile:/etc/caddy.d/authentik:ro
|
||||
|
||||
# backup the zitadel folder
|
||||
backup:
|
||||
volumes:
|
||||
- ../data/authentik:/mnt/backup/src/authentik:ro
|
||||
|
||||
volumes:
|
||||
database:
|
||||
driver: local
|
||||
redis:
|
||||
driver: local
|
@ -0,0 +1,3 @@
|
||||
auth.{$DOMAIN} {
|
||||
reverse_proxy authentik:9000
|
||||
}
|
@ -1,8 +1,8 @@
|
||||
FROM caddy:2.7.5-builder-alpine AS builder
|
||||
FROM caddy:builder-alpine AS builder
|
||||
|
||||
RUN xcaddy build \
|
||||
--with github.com/mholt/caddy-webdav
|
||||
|
||||
FROM caddy:latest
|
||||
FROM caddy:alpine
|
||||
|
||||
COPY --from=builder /usr/bin/caddy /usr/bin/caddy
|
||||
|
@ -0,0 +1,9 @@
|
||||
web.{$DOMAIN} {
|
||||
# forward_auth authelia:9091 {
|
||||
# uri /api/verify?rd=https://auth.{$DOMAIN}/
|
||||
# copy_headers Remote-User Remote-Groups Remote-Name Remote-Email
|
||||
# }
|
||||
|
||||
reverse_proxy web:4431
|
||||
}
|
||||
|
@ -1,36 +0,0 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
zitadel:
|
||||
restart: 'always'
|
||||
networks:
|
||||
- 'zitadel'
|
||||
image: 'ghcr.io/zitadel/zitadel:latest'
|
||||
command: 'start-from-init --masterkey "6cd52ccbc4da912319f0fdc016d68575dd391bd932ebdc045c89b2dce9e90315" --tlsMode disabled'
|
||||
environment:
|
||||
- 'ZITADEL_DATABASE_COCKROACH_HOST=crdb'
|
||||
- 'ZITADEL_EXTERNALSECURE=false'
|
||||
depends_on:
|
||||
crdb:
|
||||
condition: 'service_healthy'
|
||||
ports:
|
||||
- '8123:8080'
|
||||
|
||||
crdb:
|
||||
restart: 'always'
|
||||
networks:
|
||||
- 'zitadel'
|
||||
image: 'cockroachdb/cockroach:v22.2.2'
|
||||
command: 'start-single-node --insecure'
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080/health?ready=1"]
|
||||
interval: '10s'
|
||||
timeout: '30s'
|
||||
retries: 5
|
||||
start_period: '20s'
|
||||
ports:
|
||||
- '9090:8080'
|
||||
- '26257:26257'
|
||||
|
||||
networks:
|
||||
zitadel:
|
Loading…
Reference in new issue