Merge 083a1f3762
into 250030fe7a
commit
523d80a93c
@ -0,0 +1,103 @@
|
|||||||
|
MODULES += nginx
|
||||||
|
MODULES += keycloak
|
||||||
|
MODULES += hedgedoc
|
||||||
|
MODULES += grafana
|
||||||
|
MODULES += prometheus
|
||||||
|
MODULES += mastodon
|
||||||
|
MODULES += matrix
|
||||||
|
MODULES += nextcloud
|
||||||
|
MODULES += mobilizon
|
||||||
|
MODULES += gitea
|
||||||
|
#MODULES += pixelfed
|
||||||
|
|
||||||
|
include env.production
|
||||||
|
domain_name := $(DOMAIN_NAME)
|
||||||
|
|
||||||
|
|
||||||
|
help:
|
||||||
|
@echo "usage: make run"
|
||||||
|
UC = $(shell echo '$1' | tr '[:lower:]' '[:upper:]')
|
||||||
|
|
||||||
|
DOCKER = \
|
||||||
|
$(foreach m,$(MODULES),. data/$m/secrets && ) \
|
||||||
|
docker-compose \
|
||||||
|
--env-file env.production \
|
||||||
|
$(foreach m,$(MODULES),--file ./$m.yaml) \
|
||||||
|
|
||||||
|
run:
|
||||||
|
$(DOCKER) up
|
||||||
|
down:
|
||||||
|
$(DOCKER) down
|
||||||
|
nginx-shell:
|
||||||
|
$(DOCKER) exec nginx sh
|
||||||
|
grafana-shell:
|
||||||
|
$(DOCKER) exec grafana bash
|
||||||
|
hedgedoc-shell:
|
||||||
|
$(DOCKER) exec hedgedoc sh
|
||||||
|
keycloak-shell:
|
||||||
|
$(DOCKER) exec keycloak sh
|
||||||
|
mastodon-shell:
|
||||||
|
$(DOCKER) exec mastodon bash
|
||||||
|
mastodon-streaming-shell:
|
||||||
|
$(DOCKER) exec mastodon-streaming bash
|
||||||
|
matrix-shell:
|
||||||
|
$(DOCKER) exec matrix-synapse bash
|
||||||
|
matrix-logs:
|
||||||
|
$(DOCKER) logs -f matrix-synapse
|
||||||
|
nextcloud-logs:
|
||||||
|
$(DOCKER) logs -f nextcloud
|
||||||
|
nginx-build: data/nginx/secrets
|
||||||
|
$(DOCKER) build nginx
|
||||||
|
|
||||||
|
certdir = ./data/certbot/conf/live/${DOMAIN_NAME}
|
||||||
|
|
||||||
|
run: secrets-setup
|
||||||
|
|
||||||
|
secrets-setup: $(foreach m,$(MODULES),data/$m/secrets)
|
||||||
|
|
||||||
|
# Create the per-subdomain secrets if they don't exist
|
||||||
|
# not every service requires all of these features, but create them anyway
|
||||||
|
GET_MODULE = $(call UC,$(word 2,$(subst /, ,$@)))
|
||||||
|
RAND = $$(openssl rand -hex $1)
|
||||||
|
|
||||||
|
data/%/secrets:
|
||||||
|
mkdir -p $(dir $@)
|
||||||
|
echo >$@ "# DO NOT CHECK IN"
|
||||||
|
echo >>$@ "export $(GET_MODULE)_ADMIN_PASSWORD=$(call RAND,8)"
|
||||||
|
echo >>$@ "export $(GET_MODULE)_CLIENT_SECRET=$(call RAND,20)"
|
||||||
|
echo >>$@ "export $(GET_MODULE)_SESSION_SECRET=$(call RAND,20)"
|
||||||
|
|
||||||
|
data/gitea/secrets: data/gitea/host-setup.done
|
||||||
|
data/gitea/host-setup.done:
|
||||||
|
sudo ./gitea/host-setup.sh
|
||||||
|
mkdir -p $(dir $@)
|
||||||
|
touch $@
|
||||||
|
|
||||||
|
keycloak-setup: secrets-setup
|
||||||
|
$(DOCKER) run keycloak-setup
|
||||||
|
|
||||||
|
certbot:
|
||||||
|
$(DOCKER) \
|
||||||
|
run --entrypoint '/bin/sh -c "\
|
||||||
|
rm -rf /etc/letsencrypt ; \
|
||||||
|
certbot certonly \
|
||||||
|
--webroot \
|
||||||
|
--webroot-path /var/www/certbot \
|
||||||
|
--email "admin@$(DOMAIN_NAME)" \
|
||||||
|
--rsa-key-size "2048" \
|
||||||
|
--agree-tos \
|
||||||
|
--no-eff-email \
|
||||||
|
--force-renewal \
|
||||||
|
-d $(DOMAIN_NAME) \
|
||||||
|
$(foreach m,$(MODULES),\
|
||||||
|
-d $($(call UC,$m)_HOSTNAME).$(DOMAIN_NAME)) \
|
||||||
|
"' certbot
|
||||||
|
|
||||||
|
nginx-reload:
|
||||||
|
$(DOCKER) restart nginx
|
||||||
|
|
||||||
|
|
||||||
|
config:
|
||||||
|
$(DOCKER) config
|
||||||
|
|
||||||
|
FORCE:
|
@ -1,12 +1,27 @@
|
|||||||
DOMAIN_NAME=hackerspace.zone
|
# Fill in with your top-level domain name and desired OAUTH realm name
|
||||||
|
DOMAIN_NAME=dev.v.st
|
||||||
REALM=hackerspace
|
REALM=hackerspace
|
||||||
|
|
||||||
KEYCLOAK_HOSTNAME=login.hackerspace.zone
|
# Fill in with your SMTP server, if you have one
|
||||||
HEDGEDOC_HOSTNAME=docs.hackerspace.zone
|
SMTP_SERVER=
|
||||||
MASTODON_HOSTNAME=social.hackerspace.zone
|
SMTP_USER=
|
||||||
NEXTCLOUD_HOSTNAME=cloud.hackerspace.zone
|
SMTP_PASSWORD=
|
||||||
GRAFANA_HOSTNAME=dashboard.hackerspace.zone
|
SMTP_PORT=
|
||||||
GITEA_HOSTNAME=git.hackerspace.zone
|
|
||||||
MATRIX_HOSTNAME=matrix.hackerspace.zone
|
# You can leave these as is or change them if you like
|
||||||
MOBILIZON_HOSTNAME=events.hackerspace.zone
|
NGINX_HOSTNAME=www
|
||||||
PIXELFED_HOSTNAME=pixelfed.hackerspace.zone
|
KEYCLOAK_HOSTNAME=login
|
||||||
|
HEDGEDOC_HOSTNAME=docs
|
||||||
|
MASTODON_HOSTNAME=social
|
||||||
|
NEXTCLOUD_HOSTNAME=cloud
|
||||||
|
GRAFANA_HOSTNAME=dashboard
|
||||||
|
GITEA_HOSTNAME=git
|
||||||
|
MATRIX_HOSTNAME=matrix
|
||||||
|
MOBILIZON_HOSTNAME=events
|
||||||
|
PIXELFED_HOSTNAME=pixelfed
|
||||||
|
PROMETHEUS_HOSTNAME=metrics
|
||||||
|
|
||||||
|
AUTH_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/auth
|
||||||
|
TOKEN_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/token
|
||||||
|
USERINFO_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/userinfo
|
||||||
|
LOGOUT_URL=https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/protocol/openid-connect/logout
|
||||||
|
@ -0,0 +1,69 @@
|
|||||||
|
# gitea requires ssh access from the host machine, which needs special setup
|
||||||
|
# In order to create the git user and auth keys, you need to run:
|
||||||
|
#
|
||||||
|
# sudo gitea/setup.sh
|
||||||
|
#
|
||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
gitea:
|
||||||
|
image: gitea/gitea:1.17.3
|
||||||
|
container_name: gitea
|
||||||
|
env_file:
|
||||||
|
- ./env.production
|
||||||
|
environment:
|
||||||
|
- USER_UID=2222 # must match git user on host system
|
||||||
|
- USER_GID=2222
|
||||||
|
- GITEA_CLIENT_SECRET=${GITEA_CLIENT_SECRET}
|
||||||
|
- GITEA_ADMIN_PASSWORD=${GITEA_ADMIN_PASSWORD}
|
||||||
|
- GITEA__database__DB_TYPE=postgres
|
||||||
|
- GITEA__database__HOST=gitea-db:5432
|
||||||
|
- GITEA__database__NAME=gitea
|
||||||
|
- GITEA__database__USER=gitea
|
||||||
|
- GITEA__database__PASSWD=gitea
|
||||||
|
- GITEA__oauth2_client__ENABLE_AUTO_REGISTRATION=true
|
||||||
|
- GITEA__openid__ENABLE_OPENID_SIGNIN=true
|
||||||
|
- GITEA__openid__ENABLE_OPENID_SIGNUP=false
|
||||||
|
- GITEA__service__DISABLE_REGISTRATION=true
|
||||||
|
- GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION=true
|
||||||
|
- GITEA__repository__DEFAULT_BRANCH=main
|
||||||
|
- GITEA__server__ROOT_URL=https://${GITEA_HOSTNAME}.${DOMAIN_NAME}/
|
||||||
|
- GITEA__server__SSH_DOMAIN=${GITEA_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
- GITEA__security__SECRET_KEY=${GITEA_SESSION_SECRET}
|
||||||
|
- GITEA__security__INSTALL_LOCK=true
|
||||||
|
entrypoint: ["/setup.sh"]
|
||||||
|
volumes:
|
||||||
|
- ./gitea/setup.sh:/setup.sh:ro
|
||||||
|
- ./data/gitea:/data
|
||||||
|
- /etc/timezone:/etc/timezone:ro
|
||||||
|
- /etc/localtime:/etc/localtime:ro
|
||||||
|
- /home/git/.ssh/:/data/git/.ssh
|
||||||
|
ports:
|
||||||
|
# - "3030:3000"
|
||||||
|
- "2222:22" # route host port 2222 to container port 22 for inbound ssh
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- gitea-db
|
||||||
|
|
||||||
|
gitea-db:
|
||||||
|
image: postgres:13.4-alpine
|
||||||
|
container_name: gitea-db
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=gitea
|
||||||
|
- POSTGRES_PASSWORD=gitea
|
||||||
|
- POSTGRES_DB=gitea
|
||||||
|
volumes:
|
||||||
|
- ./data/gitea/postgres:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
# add the gitea nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./gitea/nginx.conf:/etc/nginx/templates/gitea.conf.template:ro
|
||||||
|
|
||||||
|
# add the gitea client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/gitea/secrets
|
||||||
|
volumes:
|
||||||
|
- ./gitea/keycloak.sh:/keycloak-setup/gitea.sh:ro
|
@ -1,3 +1,6 @@
|
|||||||
# gitea
|
# gitea
|
||||||
|
|
||||||
OIDC setup is now automated
|
There is still a sudo step that has to happen on the host.
|
||||||
|
|
||||||
|
OIDC setup is now automated in the container.
|
||||||
|
|
||||||
|
@ -1,46 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
networks:
|
|
||||||
gitea:
|
|
||||||
external: false
|
|
||||||
|
|
||||||
services:
|
|
||||||
gitea:
|
|
||||||
image: gitea/gitea:1.16.6
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/gitea/env.secrets
|
|
||||||
environment:
|
|
||||||
- USER_UID=2222 # must match git user on host system
|
|
||||||
- USER_GID=2222
|
|
||||||
- GITEA__database__DB_TYPE=postgres
|
|
||||||
- GITEA__database__HOST=db:5432
|
|
||||||
- GITEA__database__NAME=gitea
|
|
||||||
- GITEA__database__USER=gitea
|
|
||||||
- GITEA__database__PASSWD=gitea
|
|
||||||
networks:
|
|
||||||
- gitea
|
|
||||||
volumes:
|
|
||||||
- ../data/gitea:/data
|
|
||||||
- /etc/timezone:/etc/timezone:ro
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
- /home/git/.ssh/:/data/git/.ssh
|
|
||||||
ports:
|
|
||||||
- "3030:3000"
|
|
||||||
- "2222:22"
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- db
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: postgres:13.4-alpine
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=gitea
|
|
||||||
- POSTGRES_PASSWORD=gitea
|
|
||||||
- POSTGRES_DB=gitea
|
|
||||||
volumes:
|
|
||||||
- ../data/gitea/postgres:/var/lib/postgresql/data
|
|
||||||
networks:
|
|
||||||
- gitea
|
|
@ -1,7 +0,0 @@
|
|||||||
# gitea config for keycloak integration
|
|
||||||
# only allow open id sign-in, turn off all other registrations
|
|
||||||
GITEA__openid__ENABLE_OPENID_SIGNIN=true
|
|
||||||
GITEA__openid__ENABLE_OPENID_SIGNUP=false
|
|
||||||
#GITEA__service__DISABLE_REGISTRATION=true
|
|
||||||
GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION=true
|
|
||||||
GITEA__repository__DEFAULT_BRANCH=main
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# Setup the gitea client connection
|
||||||
|
|
||||||
|
client-create gitea "$GITEA_HOSTNAME.$DOMAIN_NAME" "$GITEA_CLIENT_SECRET" </dev/null
|
@ -1,69 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "gitea: ERROR $*" ; exit 1 ; }
|
|
||||||
info() { echo >&2 "gitea: $*" ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
|
|
||||||
source ../env.production || die "no top level environment"
|
|
||||||
source ./env.production || die "no local environment"
|
|
||||||
|
|
||||||
DATA="../data/gitea"
|
|
||||||
SECRETS="$DATA/env.secrets"
|
|
||||||
INI="$DATA/gitea/conf/app.ini"
|
|
||||||
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "unable to start"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
./add-ssh-user || die "unable to add ssh user"
|
|
||||||
|
|
||||||
GITEA_CLIENT_SECRET="$(openssl rand -hex 32)"
|
|
||||||
GITEA_ADMIN_PASSWORD="$(openssl rand -hex 8)"
|
|
||||||
|
|
||||||
info "creating new secrets $SECRETS"
|
|
||||||
|
|
||||||
mkdir -p "$DATA"
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# DO NOT CHECK IN
|
|
||||||
GITEA_CLIENT_SECRET=$GITEA_CLIENT_SECRET
|
|
||||||
GITEA_ADMIN_PASSWORD=$GITEA_ADMIN_PASSWORD
|
|
||||||
GITEA__server__ROOT_URL=https://$GITEA_HOSTNAME/
|
|
||||||
GITEA__server__SSH_DOMAIN=$GITEA_HOSTNAME
|
|
||||||
GITEA__security__INSTALL_LOCK=true
|
|
||||||
GITEA__security__SECRET_KEY=$(openssl rand -hex 32)
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
|
|
||||||
../keycloak/client-delete gitea 2>/dev/null
|
|
||||||
../keycloak/client-create <<EOF || die "unable to create gitea client"
|
|
||||||
{
|
|
||||||
"clientId": "gitea",
|
|
||||||
"rootUrl": "https://$GITEA_HOSTNAME",
|
|
||||||
"adminUrl": "https://$GITEA_HOSTNAME",
|
|
||||||
"redirectUris": [ "https://$GITEA_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$GITEA_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$GITEA_CLIENT_SECRET"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
docker-compose up -d || die "unable to start container"
|
|
||||||
|
|
||||||
info "waiting for startup..."
|
|
||||||
sleep 5
|
|
||||||
|
|
||||||
info "adding oauth login"
|
|
||||||
docker-compose exec -u git gitea \
|
|
||||||
gitea admin auth add-oauth \
|
|
||||||
--name "keycloak" \
|
|
||||||
--provider "openidConnect" \
|
|
||||||
--key "gitea" \
|
|
||||||
--secret "$GITEA_CLIENT_SECRET" \
|
|
||||||
--auto-discover-url "https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/.well-known/openid-configuration" \
|
|
||||||
--group-claim-name "groups" \
|
|
||||||
--admin-group "admin" \
|
|
||||||
|| die "unable to add oauth interface"
|
|
@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# This is *container* setup for the OIDC stuff
|
||||||
|
|
||||||
|
export APP_NAME="${DOMAIN_NAME} Gitea"
|
||||||
|
OIDC_CANARY="/data/oidc.done"
|
||||||
|
|
||||||
|
if [ -r "$OIDC_CANARY" ]; then
|
||||||
|
# based on https://github.com/go-gitea/gitea/blob/main/Dockerfile
|
||||||
|
exec "/usr/bin/entrypoint" "/bin/s6-svscan" "/etc/s6";
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# We have to do some setup, so start things and wait for the config
|
||||||
|
# file to appear so that we can edit it.
|
||||||
|
"/usr/bin/entrypoint" "/bin/s6-svscan" "/etc/s6" &
|
||||||
|
|
||||||
|
echo >&2 "*** Sleeping for setup"
|
||||||
|
sleep 30
|
||||||
|
|
||||||
|
echo >&2 "*** Adding OIDC login for $DOMAIN_NAME"
|
||||||
|
su -s /bin/sh git <<EOF || exit 1
|
||||||
|
gitea admin auth add-oauth \
|
||||||
|
--name "keycloak" \
|
||||||
|
--provider "openidConnect" \
|
||||||
|
--key "gitea" \
|
||||||
|
--secret "${GITEA_CLIENT_SECRET}" \
|
||||||
|
--auto-discover-url "https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}/.well-known/openid-configuration" \
|
||||||
|
--group-claim-name "groups" \
|
||||||
|
--admin-group "admin" \
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
touch "${OIDC_CANARY}"
|
||||||
|
|
||||||
|
echo >&2 "*** Done, maybe it works?"
|
||||||
|
exit 0
|
@ -0,0 +1,43 @@
|
|||||||
|
version: "3"
|
||||||
|
services:
|
||||||
|
grafana:
|
||||||
|
image: grafana/grafana-oss:8.5.1
|
||||||
|
container_name: grafana
|
||||||
|
user: "0:0"
|
||||||
|
environment:
|
||||||
|
GF_AUTH_GENERIC_OAUTH_ENABLED: 'True'
|
||||||
|
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: 'True' # otherwise no login is possible
|
||||||
|
#GF_AUTH_GENERIC_OAUTH_TEAM_IDS: ''
|
||||||
|
#GF_AUTH_GENERIC_OAUTH_ALLOWED_ORGANIZATIONS: ''
|
||||||
|
#GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: '<domains>'
|
||||||
|
#GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_ADMIN_PASSWORD} # ignored?
|
||||||
|
GF_AUTH_GENERIC_OAUTH_NAME: Keycloak
|
||||||
|
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: grafana
|
||||||
|
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
|
||||||
|
GF_SERVER_ROOT_URL: https://${GRAFANA_HOSTNAME}.${DOMAIN_NAME}/
|
||||||
|
GF_SERVER_DOMAIN: ${GRAFANA_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
GF_AUTH_GENERIC_OAUTH_AUTH_URL: ${AUTH_URL}
|
||||||
|
GF_AUTH_GENERIC_OAUTH_TOKEN_URL: ${TOKEN_URL}
|
||||||
|
GF_AUTH_GENERIC_OAUTH_API_URL: ${USERINFO_URL}
|
||||||
|
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET: ${GRAFANA_CLIENT_SECRET}
|
||||||
|
# reset the admin password on every run, since otherwise it defaults to admin/admin
|
||||||
|
entrypoint: ["sh", "-c", "grafana-cli admin reset-admin-password ${GRAFANA_ADMIN_PASSWORD} && /run.sh"]
|
||||||
|
volumes:
|
||||||
|
- ./data/grafana/data:/var/lib/grafana
|
||||||
|
- ./grafana/provisioning:/etc/grafana/provisioning:ro
|
||||||
|
- ./grafana/dashboards:/etc/grafana/dashboards:ro
|
||||||
|
restart: always
|
||||||
|
# ports:
|
||||||
|
# - 3000:3000
|
||||||
|
|
||||||
|
# add the grafana nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./grafana/nginx.conf:/etc/nginx/templates/grafana.conf.template:ro
|
||||||
|
|
||||||
|
# add the grafana client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/grafana/secrets
|
||||||
|
volumes:
|
||||||
|
- ./grafana/keycloak.sh:/keycloak-setup/grafana.sh:ro
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,26 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
grafana:
|
|
||||||
image: grafana/grafana-oss:8.5.1
|
|
||||||
user: "0:0"
|
|
||||||
environment:
|
|
||||||
GF_AUTH_GENERIC_OAUTH_ENABLED: 'True'
|
|
||||||
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: 'True' # otherwise no login is possible
|
|
||||||
#GF_AUTH_GENERIC_OAUTH_TEAM_IDS: ''
|
|
||||||
#GF_AUTH_GENERIC_OAUTH_ALLOWED_ORGANIZATIONS: ''
|
|
||||||
#GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: '<domains>'
|
|
||||||
GF_AUTH_GENERIC_OAUTH_NAME: Keycloak
|
|
||||||
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: grafana
|
|
||||||
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
|
|
||||||
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET is in env.secrets
|
|
||||||
# auth URLs are in the env.secrets since they have hostname expansion
|
|
||||||
volumes:
|
|
||||||
- ../data/grafana:/var/lib/grafana
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- 8000:3000
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/grafana/env.secrets
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# Setup the grafana client connection
|
||||||
|
|
||||||
|
client-create grafana "$GRAFANA_HOSTNAME.$DOMAIN_NAME" "$GRAFANA_CLIENT_SECRET" </dev/null
|
@ -0,0 +1,15 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
|
||||||
|
providers:
|
||||||
|
- name: 'hackerspace-zone dashboards'
|
||||||
|
orgId: 1
|
||||||
|
folder: ''
|
||||||
|
folderUid: ''
|
||||||
|
type: file
|
||||||
|
disableDeletion: false
|
||||||
|
updateIntervalSeconds: 10
|
||||||
|
allowUiUpdates: false
|
||||||
|
options:
|
||||||
|
path: /etc/grafana/dashboards
|
||||||
|
foldersFromFilesStructure: true
|
||||||
|
|
@ -0,0 +1,13 @@
|
|||||||
|
apiVersion: 1
|
||||||
|
datasources:
|
||||||
|
- name: Prometheus
|
||||||
|
version: 2
|
||||||
|
orgId: 1
|
||||||
|
uid: 5qpBRfD4k
|
||||||
|
type: prometheus
|
||||||
|
access: proxy
|
||||||
|
url: http://prometheus:9090
|
||||||
|
basicAuth: false
|
||||||
|
isDefault: true
|
||||||
|
jsonData:
|
||||||
|
httpMethod: POST
|
@ -1,50 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production || die "no top level env?"
|
|
||||||
source env.production || die "no local env?"
|
|
||||||
|
|
||||||
BASE="https://$KEYCLOAK_HOSTNAME/realms/$REALM/protocol/openid-connect"
|
|
||||||
SECRETS="../data/grafana/env.secrets"
|
|
||||||
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "grafana: unable to start container"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
|
|
||||||
GRAFANA_CLIENT_SECRET="$(openssl rand -hex 32)"
|
|
||||||
GRAFANA_ADMIN_PASSWORD="$(openssl rand -hex 4)"
|
|
||||||
|
|
||||||
echo "Generating secrets: admin password $GRAFANA_ADMIN_PASSWORD"
|
|
||||||
mkdir -p "$(dirname "$SECRETS")"
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# Do not check in!
|
|
||||||
GF_SECURITY_ADMIN_PASSWORD=$GRAFANA_ADMIN_PASSWORD
|
|
||||||
GF_SERVER_ROOT_URL=https://$GRAFANA_HOSTNAME/
|
|
||||||
GF_SERVER_DOMAIN=$GRAFANA_HOSTNAME
|
|
||||||
GF_AUTH_GENERIC_OAUTH_AUTH_URL=$BASE/auth
|
|
||||||
GF_AUTH_GENERIC_OAUTH_TOKEN_URL=$BASE/token
|
|
||||||
GF_AUTH_GENERIC_OAUTH_API_URL=$BASE/userinfo
|
|
||||||
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=$GRAFANA_CLIENT_SECRET
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
../keycloak/client-delete 'grafana' 2>/dev/null
|
|
||||||
|
|
||||||
../keycloak/client-create << EOF || die "unable to create client id"
|
|
||||||
{
|
|
||||||
"clientId": "grafana",
|
|
||||||
"rootUrl": "https://$GRAFANA_HOSTNAME/",
|
|
||||||
"adminUrl": "https://$GRAFANA_HOSTNAME/",
|
|
||||||
"redirectUris": [ "https://$GRAFANA_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$GRAFANA_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$GRAFANA_CLIENT_SECRET"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
docker-compose up -d || die "grafana: unable to bring up container"
|
|
@ -0,0 +1,58 @@
|
|||||||
|
version: '3.9'
|
||||||
|
services:
|
||||||
|
hedgedoc-db:
|
||||||
|
image: postgres:13.4-alpine
|
||||||
|
container_name: hedgedoc-db
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=hedgedoc
|
||||||
|
- POSTGRES_PASSWORD=password
|
||||||
|
- POSTGRES_DB=hedgedoc
|
||||||
|
volumes:
|
||||||
|
- ./data/hedgedoc/database:/var/lib/postgresql/data
|
||||||
|
restart: always
|
||||||
|
|
||||||
|
hedgedoc:
|
||||||
|
# Make sure to use the latest release from https://hedgedoc.org/latest-release
|
||||||
|
image: quay.io/hedgedoc/hedgedoc:1.9.4
|
||||||
|
container_name: hedgedoc
|
||||||
|
environment:
|
||||||
|
#- CMD_CSP_ENABLE=false
|
||||||
|
- CMD_DB_URL=postgres://hedgedoc:password@hedgedoc-db:5432/hedgedoc
|
||||||
|
- CMD_PROTOCOL_USESSL=true
|
||||||
|
- CMD_ALLOW_ANONYMOUS=false # anonymous user's can't create notes
|
||||||
|
- CMD_ALLOW_ANONYMOUS_EDITS=true # but they can be invited to edit notes
|
||||||
|
- CMD_ALLOW_FREEURL=true # users can create arbitrary names
|
||||||
|
- CMD_EMAIL=false # only oauth logins
|
||||||
|
- CMD_DOMAIN=${HEDGEDOC_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
- CMD_OAUTH2_AUTHORIZATION_URL=${AUTH_URL}
|
||||||
|
- CMD_OAUTH2_TOKEN_URL=${TOKEN_URL}
|
||||||
|
- CMD_OAUTH2_USER_PROFILE_URL=${USERINFO_URL}
|
||||||
|
- CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
|
||||||
|
- CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
|
||||||
|
- CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
|
||||||
|
- CMD_OAUTH2_CLIENT_ID=hedgedoc
|
||||||
|
- CMD_OAUTH2_PROVIDERNAME=Keycloak
|
||||||
|
- CMD_OAUTH2_CLIENT_SECRET=${HEDGEDOC_CLIENT_SECRET}
|
||||||
|
- CMD_SESSION_SECRET=${HEDGEDOC_SESSION_SECRET}
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
volumes:
|
||||||
|
- ./data/hedgedoc/uploads:/hedgedoc/public/uploads
|
||||||
|
# ports:
|
||||||
|
#- "3000:3000"
|
||||||
|
restart: always
|
||||||
|
depends_on:
|
||||||
|
- hedgedoc-db
|
||||||
|
- keycloak
|
||||||
|
|
||||||
|
# add the hedgedoc nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./hedgedoc/nginx.conf:/etc/nginx/templates/hedgedoc.conf.template:ro
|
||||||
|
|
||||||
|
# add the hedgedoc client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/hedgedoc/secrets
|
||||||
|
volumes:
|
||||||
|
- ./hedgedoc/keycloak.sh:/keycloak-setup/hedgedoc.sh:ro
|
@ -1,34 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
database:
|
|
||||||
image: postgres:13.4-alpine
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=hedgedoc
|
|
||||||
- POSTGRES_PASSWORD=password
|
|
||||||
- POSTGRES_DB=hedgedoc
|
|
||||||
volumes:
|
|
||||||
- ../data/hedgedoc/database:/var/lib/postgresql/data
|
|
||||||
restart: always
|
|
||||||
hedgedoc:
|
|
||||||
# Make sure to use the latest release from https://hedgedoc.org/latest-release
|
|
||||||
image: quay.io/hedgedoc/hedgedoc:1.9.3
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/hedgedoc/env.secrets
|
|
||||||
environment:
|
|
||||||
#- CMD_CSP_ENABLE=false
|
|
||||||
- CMD_DB_URL=postgres://hedgedoc:password@database:5432/hedgedoc
|
|
||||||
- CMD_PROTOCOL_USESSL=true
|
|
||||||
- CMD_ALLOW_ANONYMOUS=false # anonymous user's can't create notes
|
|
||||||
- CMD_ALLOW_ANONYMOUS_EDITS=true # but they can be invited to edit notes
|
|
||||||
- CMD_ALLOW_FREEURL=true # users can create arbitrary names
|
|
||||||
- CMD_EMAIL=false # only oauth logins
|
|
||||||
# DOMAIN and OAUTH2 variables are now in env.secret
|
|
||||||
volumes:
|
|
||||||
- ../data/hedgedoc/uploads:/hedgedoc/public/uploads
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
restart: always
|
|
||||||
depends_on:
|
|
||||||
- database
|
|
@ -1,2 +0,0 @@
|
|||||||
CMD_OAUTH2_CLIENT_SECRET=abcdef1234
|
|
||||||
CMD_SESSION_SECRET=abcdef1234
|
|
@ -0,0 +1,50 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# Setup the hedgedoc client connection
|
||||||
|
|
||||||
|
# this might fail; we'll ignore it if we have already created it
|
||||||
|
# https://github.com/hedgedoc/hedgedoc/issues/56
|
||||||
|
kcadm.sh \
|
||||||
|
create client-scopes \
|
||||||
|
-r "$REALM" \
|
||||||
|
-f - <<EOF || echo "whatever"
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"protocol": "openid-connect",
|
||||||
|
"attributes": {
|
||||||
|
"include.in.token.scope": "true",
|
||||||
|
"display.on.consent.screen": "true"
|
||||||
|
},
|
||||||
|
"protocolMappers": [
|
||||||
|
{
|
||||||
|
"name": "id",
|
||||||
|
"protocol": "openid-connect",
|
||||||
|
"protocolMapper": "oidc-usermodel-property-mapper",
|
||||||
|
"consentRequired": false,
|
||||||
|
"config": {
|
||||||
|
"user.attribute": "id",
|
||||||
|
"id.token.claim": "true",
|
||||||
|
"access.token.claim": "true",
|
||||||
|
"jsonType.label": "String",
|
||||||
|
"userinfo.token.claim": "true"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
client-create hedgedoc "$HEDGEDOC_HOSTNAME.$DOMAIN_NAME" "$HEDGEDOC_CLIENT_SECRET" <<EOF
|
||||||
|
,"defaultClientScopes": [
|
||||||
|
"web-origins",
|
||||||
|
"acr",
|
||||||
|
"profile",
|
||||||
|
"roles",
|
||||||
|
"id",
|
||||||
|
"email"
|
||||||
|
],
|
||||||
|
"optionalClientScopes": [
|
||||||
|
"address",
|
||||||
|
"phone",
|
||||||
|
"offline_access",
|
||||||
|
"microprofile-jwt"
|
||||||
|
]
|
||||||
|
EOF
|
@ -1,69 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production || die "no top levle env?"
|
|
||||||
source env.production || die "no local env?"
|
|
||||||
|
|
||||||
DATA="../data/hedgedoc"
|
|
||||||
SECRETS="$DATA/env.secrets"
|
|
||||||
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "hedgedoc: unable to start"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
|
|
||||||
# regenerate the client secrets
|
|
||||||
CLIENT_SECRET="$(openssl rand -hex 20)"
|
|
||||||
SESSION_SECRET="$(openssl rand -hex 20)"
|
|
||||||
|
|
||||||
mkdir -p "$DATA/uploads"
|
|
||||||
chmod 666 "$DATA/uploads"
|
|
||||||
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# DO NOT CHECK IN
|
|
||||||
CMD_OAUTH2_CLIENT_SECRET=$CLIENT_SECRET
|
|
||||||
CMD_SESSION_SECRET=$SESSION_SECRET
|
|
||||||
CMD_DOMAIN=${HEDGEDOC_HOSTNAME}
|
|
||||||
CMD_OAUTH2_AUTHORIZATION_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/auth
|
|
||||||
CMD_OAUTH2_TOKEN_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/token
|
|
||||||
CMD_OAUTH2_USER_PROFILE_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/userinfo
|
|
||||||
CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
|
|
||||||
CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
|
|
||||||
CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
|
|
||||||
CMD_OAUTH2_CLIENT_ID=hedgedoc
|
|
||||||
CMD_OAUTH2_PROVIDERNAME=Keycloak
|
|
||||||
EOF
|
|
||||||
|
|
||||||
../keycloak/client-delete hedgedoc
|
|
||||||
|
|
||||||
../keycloak/client-create <<EOF || die "unable to create hedgedoc client"
|
|
||||||
{
|
|
||||||
"clientId": "hedgedoc",
|
|
||||||
"rootUrl": "https://$HEDGEDOC_HOSTNAME",
|
|
||||||
"adminUrl": "https://$HEDGEDOC_HOSTNAME",
|
|
||||||
"redirectUris": [ "https://$HEDGEDOC_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$HEDGEDOC_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$CLIENT_SECRET",
|
|
||||||
"defaultClientScopes": [
|
|
||||||
"web-origins",
|
|
||||||
"acr",
|
|
||||||
"profile",
|
|
||||||
"roles",
|
|
||||||
"id",
|
|
||||||
"email"
|
|
||||||
],
|
|
||||||
"optionalClientScopes": [
|
|
||||||
"address",
|
|
||||||
"phone",
|
|
||||||
"offline_access",
|
|
||||||
"microprofile-jwt"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
docker-compose up -d || die "hedgedoc: unable to start container"
|
|
@ -0,0 +1,68 @@
|
|||||||
|
version: '3.9'
|
||||||
|
services:
|
||||||
|
keycloak-db:
|
||||||
|
image: mysql:5.7
|
||||||
|
restart: always
|
||||||
|
container_name: keycloak-db
|
||||||
|
volumes:
|
||||||
|
- ./data/keycloak/database:/var/lib/mysql
|
||||||
|
environment:
|
||||||
|
MYSQL_ROOT_PASSWORD: root
|
||||||
|
MYSQL_DATABASE: keycloak
|
||||||
|
MYSQL_USER: keycloak
|
||||||
|
MYSQL_PASSWORD: password
|
||||||
|
|
||||||
|
keycloak:
|
||||||
|
image: quay.io/keycloak/keycloak:18.0.0
|
||||||
|
restart: always
|
||||||
|
container_name: keycloak
|
||||||
|
entrypoint: /opt/keycloak/bin/kc.sh start --hostname="$${KEYCLOAK_HOSTNAME}.$${DOMAIN_NAME}" --proxy=edge
|
||||||
|
# healthcheck:
|
||||||
|
# test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||||
|
# interval: 30s
|
||||||
|
# timeout: 10s
|
||||||
|
# retries: 3
|
||||||
|
user: "0:0" # otherwise the persistent data directory is not writable
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
- data/keycloak/secrets
|
||||||
|
environment:
|
||||||
|
DB_VENDOR: MYSQL
|
||||||
|
DB_ADDR: keycloak-db
|
||||||
|
DB_DATABASE: keycloak
|
||||||
|
DB_USER: keycloak
|
||||||
|
DB_PASSWORD: password
|
||||||
|
KEYCLOAK_ADMIN: admin
|
||||||
|
KEYCLOAK_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}
|
||||||
|
PROXY_ADDRESS_FORWARDING: 'true'
|
||||||
|
# KEYCLOAK_ADMIN_PASSWORD is set in env.secrets
|
||||||
|
volumes:
|
||||||
|
- ./data/keycloak/certs:/etc/x509/https
|
||||||
|
- ./data/keycloak/keycloak:/opt/keycloak/data
|
||||||
|
depends_on:
|
||||||
|
- keycloak-db
|
||||||
|
|
||||||
|
# all of the various subdomains can install files in
|
||||||
|
# /keycloak-setup/ to be executed during the setup phase
|
||||||
|
# to enable their clients using the client-create tool
|
||||||
|
keycloak-setup:
|
||||||
|
image: quay.io/keycloak/keycloak:18.0.0
|
||||||
|
container_name: keycloak-setup
|
||||||
|
profiles:
|
||||||
|
- setup
|
||||||
|
depends_on:
|
||||||
|
- keycloak
|
||||||
|
restart: never
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
- data/keycloak/secrets
|
||||||
|
entrypoint: /entrypoint.sh
|
||||||
|
volumes:
|
||||||
|
- ./keycloak/entrypoint-setup.sh:/entrypoint.sh:ro
|
||||||
|
- ./keycloak/mail-setup.sh:/keycloak-setup/mail-setup.sh:ro
|
||||||
|
- ./keycloak/client-create:/bin/client-create:ro
|
||||||
|
|
||||||
|
# add the keycloak nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./keycloak/nginx.conf:/etc/nginx/templates/keycloak.conf.template:ro
|
@ -1,7 +1,6 @@
|
|||||||
# Keycloak
|
# Keycloak
|
||||||
|
|
||||||
Keycloak is the single-sign-on user authentication provider.
|
Keycloak is the single-sign-on user authentication provider.
|
||||||
You must set the `KEYCLOAK_ADMIN_PASSWORD` in the `env.secrets` file.
|
In order to login to create the first account, use `admin` and
|
||||||
This is the most important secret: it allows user accounts to be created
|
the password stored in `data/keycloak/secrets`
|
||||||
for all the other services.
|
|
||||||
|
|
||||||
|
@ -1,20 +1,34 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
die() { echo >&2 "$@" ; exit 1 ; }
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
client_name="$1"
|
||||||
cd "$DIRNAME"
|
hostname="$2"
|
||||||
|
secret="$3"
|
||||||
|
|
||||||
source ../env.production || die "no top levle env?"
|
client_id="$(kcadm.sh get clients \
|
||||||
source env.production || die "no local env?"
|
|
||||||
source "../data/keycloak/env.secrets" || die "no local secrets?"
|
|
||||||
|
|
||||||
docker-compose exec -T keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
create clients \
|
|
||||||
--server http://localhost:8080/ \
|
|
||||||
--user admin \
|
|
||||||
--realm master \
|
|
||||||
--password "$KEYCLOAK_ADMIN_PASSWORD" \
|
|
||||||
-r "$REALM" \
|
-r "$REALM" \
|
||||||
-f - \
|
--fields id \
|
||||||
|| die "create client failed"
|
-q clientId="$client_name" \
|
||||||
|
--format csv \
|
||||||
|
--noquotes \
|
||||||
|
)"
|
||||||
|
|
||||||
|
if [ -n "$client_id" ]; then
|
||||||
|
kcadm.sh delete "clients/$client_id" -r "$REALM" || die "$client_id: unable to delete"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# remember to add a leading , if adding extra data
|
||||||
|
extra="$(cat -)"
|
||||||
|
|
||||||
|
kcadm.sh create clients -r "$REALM" -f - <<EOF || die "$client_id: unable to create"
|
||||||
|
{
|
||||||
|
"clientId": "$client_name",
|
||||||
|
"rootUrl": "https://$hostname",
|
||||||
|
"adminUrl": "https://$hostname",
|
||||||
|
"redirectUris": [ "https://$hostname/*" ],
|
||||||
|
"webOrigins": [ "https://$hostname" ],
|
||||||
|
"clientAuthenticatorType": "client-secret",
|
||||||
|
"secret": "$secret"
|
||||||
|
$extra
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
@ -1,40 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
|
|
||||||
source ../env.production || die "no top levle env?"
|
|
||||||
source env.production || die "no local env?"
|
|
||||||
source "../data/keycloak/env.secrets" || die "no local secrets?"
|
|
||||||
|
|
||||||
# try to get the clients by name
|
|
||||||
CLIENT_NAME="$1"
|
|
||||||
if [ -z "$CLIENT_NAME" ]; then
|
|
||||||
die "usage: $0 clientName"
|
|
||||||
fi
|
|
||||||
|
|
||||||
CLIENT_ID="$(docker-compose exec -T keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
get clients \
|
|
||||||
--server http://localhost:8080/ \
|
|
||||||
--user admin \
|
|
||||||
--password "$KEYCLOAK_ADMIN_PASSWORD" \
|
|
||||||
--realm master \
|
|
||||||
-r "$REALM" \
|
|
||||||
| jq -r ".[] | select( .clientId == \"$CLIENT_NAME\" ).id")"
|
|
||||||
|
|
||||||
if [ -z "$CLIENT_ID" ]; then
|
|
||||||
die "$CLIENT_NAME: no such client"
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$0: $CLIENT_NAME = $CLIENT_ID"
|
|
||||||
docker-compose exec -T keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
delete "clients/$CLIENT_ID" \
|
|
||||||
--server http://localhost:8080/ \
|
|
||||||
--user admin \
|
|
||||||
--realm master \
|
|
||||||
--password "$KEYCLOAK_ADMIN_PASSWORD" \
|
|
||||||
-r "$REALM" \
|
|
||||||
|| die "$CLIENT_NAME($CLIENT_ID): unable to remove"
|
|
@ -1,43 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
mysql_data:
|
|
||||||
driver: local
|
|
||||||
|
|
||||||
services:
|
|
||||||
mysql:
|
|
||||||
image: mysql:5.7
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../data/keycloak/database:/var/lib/mysql
|
|
||||||
environment:
|
|
||||||
MYSQL_ROOT_PASSWORD: root
|
|
||||||
MYSQL_DATABASE: keycloak
|
|
||||||
MYSQL_USER: keycloak
|
|
||||||
MYSQL_PASSWORD: password
|
|
||||||
|
|
||||||
keycloak:
|
|
||||||
image: quay.io/keycloak/keycloak:18.0.0
|
|
||||||
restart: always
|
|
||||||
entrypoint: /opt/keycloak/bin/kc.sh start --hostname="$${KEYCLOAK_HOSTNAME}" --proxy=edge
|
|
||||||
user: "0:0" # otherwise the persistent data directory is not writable
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/keycloak/env.secrets
|
|
||||||
environment:
|
|
||||||
DB_VENDOR: MYSQL
|
|
||||||
DB_ADDR: mysql
|
|
||||||
DB_DATABASE: keycloak
|
|
||||||
DB_USER: keycloak
|
|
||||||
DB_PASSWORD: password
|
|
||||||
KEYCLOAK_ADMIN: admin
|
|
||||||
# KEYCLOAK_ADMIN_PASSWORD should be set in env.secrets
|
|
||||||
PROXY_ADDRESS_FORWARDING: 'true'
|
|
||||||
volumes:
|
|
||||||
- ../data/keycloak/certs:/etc/x509/https
|
|
||||||
- ../data/keycloak/keycloak:/opt/keycloak/data
|
|
||||||
ports:
|
|
||||||
- 8080:8080
|
|
||||||
depends_on:
|
|
||||||
- mysql
|
|
@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
export PATH=/opt/keycloak/bin:$PATH
|
||||||
|
|
||||||
|
# perform an authentication as admin so that all other scripts can
|
||||||
|
# use the cached credentials
|
||||||
|
|
||||||
|
kcadm.sh \
|
||||||
|
config credentials \
|
||||||
|
--server http://keycloak:8080/ \
|
||||||
|
--user admin \
|
||||||
|
--password "$KEYCLOAK_ADMIN_PASSWORD" \
|
||||||
|
--realm master \
|
||||||
|
|| exit 1
|
||||||
|
|
||||||
|
for file in /keycloak-setup/* ; do
|
||||||
|
echo >&2 "$file: running setup"
|
||||||
|
$file || exit 1
|
||||||
|
done
|
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ -z "$SMTP_SERVER" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo >&2 "*** configuring email to use $SMTP_SERVER"
|
||||||
|
/opt/keycloak/bin/kcadm.sh update \
|
||||||
|
"realms/$REALM" \
|
||||||
|
-f - <<EOF || exit 1
|
||||||
|
{
|
||||||
|
"resetPasswordAllowed": "true",
|
||||||
|
"smtpServer" : {
|
||||||
|
"auth" : "true",
|
||||||
|
"starttls" : "true",
|
||||||
|
"user" : "$SMTP_USER",
|
||||||
|
"password" : "$SMTP_PASSWORD",
|
||||||
|
"port" : "$SMTP_PORT",
|
||||||
|
"host" : "$SMTP_SERVER",
|
||||||
|
"from" : "keycloak@$DOMAIN_NAME",
|
||||||
|
"fromDisplayName" : "Keycloak @ $DOMAIN_NAME",
|
||||||
|
"ssl" : "false"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
exit 0
|
@ -1,9 +1,9 @@
|
|||||||
server {
|
server {
|
||||||
server_name login.${DOMAIN_NAME};
|
server_name ${KEYCLOAK_HOSTNAME} ${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME};
|
||||||
client_max_body_size 128m;
|
client_max_body_size 128m;
|
||||||
|
|
||||||
location / {
|
location / {
|
||||||
proxy_pass http://host.docker.internal:8080;
|
proxy_pass http://keycloak:8080;
|
||||||
proxy_pass_header Set-Cookie;
|
proxy_pass_header Set-Cookie;
|
||||||
proxy_set_header Host $host;
|
proxy_set_header Host $host;
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
proxy_set_header X-Forwarded-For $remote_addr;
|
@ -1,119 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "keycloak: ERROR: $@" ; exit 1 ; }
|
|
||||||
info() { echo >&2 "keycloak: $@" ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production
|
|
||||||
source ./env.production
|
|
||||||
source "../env.smtp" 2>/dev/null
|
|
||||||
|
|
||||||
SECRETS="../data/keycloak/env.secrets"
|
|
||||||
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "keycloak: unable to start container"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
|
|
||||||
KEYCLOAK_ADMIN_PASSWORD="$(openssl rand -hex 8)"
|
|
||||||
echo "Keycloak admin password $KEYCLOAK_ADMIN_PASSWORD"
|
|
||||||
|
|
||||||
mkdir -p "$(dirname "$SECRETS")"
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# DO NOT CHECK IN
|
|
||||||
KEYCLOAK_ADMIN_PASSWORD=$KEYCLOAK_ADMIN_PASSWORD
|
|
||||||
EOF
|
|
||||||
|
|
||||||
docker-compose up -d || die "unable to start keycloak"
|
|
||||||
echo "sleeping a minute while keycloak initializes..."
|
|
||||||
sleep 30
|
|
||||||
|
|
||||||
|
|
||||||
info "logging into server"
|
|
||||||
docker-compose exec keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
config credentials \
|
|
||||||
--server http://localhost:8080/ \
|
|
||||||
--user admin \
|
|
||||||
--password "$KEYCLOAK_ADMIN_PASSWORD" \
|
|
||||||
--realm master \
|
|
||||||
|| die "unable to login"
|
|
||||||
|
|
||||||
|
|
||||||
info "Create a new realm for '$REALM'"
|
|
||||||
docker-compose exec keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
create realms \
|
|
||||||
-s "realm=$REALM" \
|
|
||||||
-s enabled=true \
|
|
||||||
|| die "unable to create realm"
|
|
||||||
|
|
||||||
|
|
||||||
# https://github.com/hedgedoc/hedgedoc/issues/56
|
|
||||||
info "Fix up a id bug"
|
|
||||||
docker-compose exec -T keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
create client-scopes \
|
|
||||||
-r "$REALM" \
|
|
||||||
-f - <<EOF || die "unable to create mapping"
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"protocol": "openid-connect",
|
|
||||||
"attributes": {
|
|
||||||
"include.in.token.scope": "true",
|
|
||||||
"display.on.consent.screen": "true"
|
|
||||||
},
|
|
||||||
"protocolMappers": [
|
|
||||||
{
|
|
||||||
"name": "id",
|
|
||||||
"protocol": "openid-connect",
|
|
||||||
"protocolMapper": "oidc-usermodel-property-mapper",
|
|
||||||
"consentRequired": false,
|
|
||||||
"config": {
|
|
||||||
"user.attribute": "id",
|
|
||||||
"id.token.claim": "true",
|
|
||||||
"access.token.claim": "true",
|
|
||||||
"jsonType.label": "String",
|
|
||||||
"userinfo.token.claim": "true"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [ -n "$SMTP_SERVER" ]; then
|
|
||||||
info "configuring email"
|
|
||||||
docker-compose exec -T keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh update "realms/$REALM" \
|
|
||||||
-f - <<EOF || die "unable to configure email"
|
|
||||||
{
|
|
||||||
"resetPasswordAllowed": "true",
|
|
||||||
"smtpServer" : {
|
|
||||||
"auth" : "true",
|
|
||||||
"starttls" : "true",
|
|
||||||
"user" : "$SMTP_USER",
|
|
||||||
"password" : "$SMTP_PASSWORD",
|
|
||||||
"port" : "$SMTP_PORT",
|
|
||||||
"host" : "$SMTP_SERVER",
|
|
||||||
"from" : "keycloak@$DOMAIN_NAME",
|
|
||||||
"fromDisplayName" : "Keycloak @ $DOMAIN_NAME",
|
|
||||||
"ssl" : "false"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
info "Create an admin user in realm"
|
|
||||||
docker-compose exec -T keycloak \
|
|
||||||
/opt/keycloak/bin/kcadm.sh \
|
|
||||||
create users \
|
|
||||||
-o \
|
|
||||||
--fields id,username \
|
|
||||||
-r "$REALM" \
|
|
||||||
-s username=admin \
|
|
||||||
-s enabled=true \
|
|
||||||
-s 'credentials=[{"type":"'$KEYCLOAK_ADMIN_PASSWORD'","value":"admin","temporary":false}]' \
|
|
||||||
|| die "$REALM: unable to create admin user"
|
|
@ -0,0 +1,198 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
mastodon-db:
|
||||||
|
image: postgres:13.4-alpine
|
||||||
|
restart: always
|
||||||
|
container_name: mastodon-db
|
||||||
|
#shm_size: 256mb
|
||||||
|
# networks:
|
||||||
|
# - internal_network
|
||||||
|
healthcheck:
|
||||||
|
test: ['CMD', 'pg_isready', '-U', "mastodon", "-d", "mastodon_production"]
|
||||||
|
volumes:
|
||||||
|
- ./data/mastodon/database:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=mastodon
|
||||||
|
- POSTGRES_PASSWORD=mastodon
|
||||||
|
#- POSTGRES_DB=mastodon_production
|
||||||
|
env_file:
|
||||||
|
- mastodon/env.production
|
||||||
|
|
||||||
|
mastodon-redis:
|
||||||
|
image: redis:6-alpine
|
||||||
|
restart: always
|
||||||
|
container_name: mastodon-redis
|
||||||
|
# networks:
|
||||||
|
# - internal_network
|
||||||
|
healthcheck:
|
||||||
|
test: ['CMD', 'redis-cli', 'ping']
|
||||||
|
env_file:
|
||||||
|
- mastodon/env.production
|
||||||
|
volumes:
|
||||||
|
- ./data/mastodon/redis:/data
|
||||||
|
|
||||||
|
mastodon-es:
|
||||||
|
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
|
||||||
|
restart: always
|
||||||
|
container_name: mastodon-es
|
||||||
|
environment:
|
||||||
|
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||||
|
- "cluster.name=es-mastodon"
|
||||||
|
- "discovery.type=single-node"
|
||||||
|
- "bootstrap.memory_lock=true"
|
||||||
|
env_file:
|
||||||
|
- mastodon/env.production
|
||||||
|
# networks:
|
||||||
|
# - internal_network
|
||||||
|
healthcheck:
|
||||||
|
test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
|
||||||
|
volumes:
|
||||||
|
- ./data/mastodon/elasticsearch:/usr/share/elasticsearch/data
|
||||||
|
# fixup the permissions on the data directory since they are created as root on host
|
||||||
|
entrypoint: ["/bin/sh", "-c", "chown -R elasticsearch:elasticsearch data && exec /usr/local/bin/docker-entrypoint.sh eswrapper"]
|
||||||
|
ulimits:
|
||||||
|
memlock:
|
||||||
|
soft: -1
|
||||||
|
hard: -1
|
||||||
|
|
||||||
|
mastodon:
|
||||||
|
image: tootsuite/mastodon
|
||||||
|
container_name: mastodon
|
||||||
|
restart: always
|
||||||
|
#command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 6001"
|
||||||
|
user: "0:0"
|
||||||
|
command: ["/entrypoint.sh"]
|
||||||
|
# networks:
|
||||||
|
# - external_network
|
||||||
|
# - internal_network
|
||||||
|
healthcheck:
|
||||||
|
# prettier-ignore
|
||||||
|
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:6001/health || exit 1']
|
||||||
|
# ports:
|
||||||
|
#- '6001:6001'
|
||||||
|
depends_on:
|
||||||
|
- mastodon-db
|
||||||
|
- mastodon-redis
|
||||||
|
- mastodon-es
|
||||||
|
volumes:
|
||||||
|
- ./data/mastodon/system:/mastodon/public/system
|
||||||
|
- ./mastodon/entrypoint.sh:/entrypoint.sh:ro
|
||||||
|
env_file:
|
||||||
|
- mastodon/env.production
|
||||||
|
environment:
|
||||||
|
- WEB_DOMAIN=$MASTODON_HOSTNAME.$DOMAIN_NAME
|
||||||
|
- LOCAL_DOMAIN=$DOMAIN_NAME
|
||||||
|
- OIDC_DISPLAY_NAME=$REALM
|
||||||
|
- OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME.$DOMAIN_NAME/realms/$REALM
|
||||||
|
- OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME.$DOMAIN_NAME/auth/auth/openid_connect/callback
|
||||||
|
- OIDC_CLIENT_SECRET=${MASTODON_CLIENT_SECRET}
|
||||||
|
- SECRET_KEY_BASE=${MASTODON_ADMIN_PASSWORD}
|
||||||
|
- OTP_SECRET=${MASTODON_SESSION_SECRET}
|
||||||
|
- SMTP_SERVER=$SMTP_SERVER
|
||||||
|
- SMTP_PORT=$SMTP_PORT
|
||||||
|
- SMTP_LOGIN=$SMTP_USER
|
||||||
|
- SMTP_PASSWORD=$SMTP_PASSWORD
|
||||||
|
- SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
|
||||||
|
|
||||||
|
mastodon-streaming:
|
||||||
|
image: tootsuite/mastodon
|
||||||
|
restart: always
|
||||||
|
container_name: mastodon-streaming
|
||||||
|
environment:
|
||||||
|
- WEB_DOMAIN=$MASTODON_HOSTNAME.$DOMAIN_NAME
|
||||||
|
- LOCAL_DOMAIN=$DOMAIN_NAME
|
||||||
|
- OIDC_DISPLAY_NAME=$REALM
|
||||||
|
- OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME.$DOMAIN_NAME/realms/$REALM
|
||||||
|
- OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME.$DOMAIN_NAME/auth/auth/openid_connect/callback
|
||||||
|
- OIDC_CLIENT_SECRET=${MASTODON_CLIENT_SECRET}
|
||||||
|
- SECRET_KEY_BASE=${MASTODON_ADMIN_PASSWORD}
|
||||||
|
- OTP_SECRET=${MASTODON_SESSION_SECRET}
|
||||||
|
- SMTP_SERVER=$SMTP_SERVER
|
||||||
|
- SMTP_PORT=$SMTP_PORT
|
||||||
|
- SMTP_LOGIN=$SMTP_USER
|
||||||
|
- SMTP_PASSWORD=$SMTP_PASSWORD
|
||||||
|
- SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
|
||||||
|
env_file:
|
||||||
|
- mastodon/env.production
|
||||||
|
command: node ./streaming
|
||||||
|
# networks:
|
||||||
|
# - external_network
|
||||||
|
# - internal_network
|
||||||
|
volumes:
|
||||||
|
- ./data/mastodon/system:/mastodon/public/system
|
||||||
|
healthcheck:
|
||||||
|
# prettier-ignore
|
||||||
|
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
|
||||||
|
depends_on:
|
||||||
|
- mastodon-db
|
||||||
|
- mastodon-redis
|
||||||
|
|
||||||
|
mastodon-sidekiq:
|
||||||
|
image: tootsuite/mastodon
|
||||||
|
restart: always
|
||||||
|
container_name: mastodon-sidekiq
|
||||||
|
env_file:
|
||||||
|
- mastodon/env.production
|
||||||
|
environment:
|
||||||
|
- WEB_DOMAIN=$MASTODON_HOSTNAME.$DOMAIN_NAME
|
||||||
|
- LOCAL_DOMAIN=$DOMAIN_NAME
|
||||||
|
- OIDC_DISPLAY_NAME=$REALM
|
||||||
|
- OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME.$DOMAIN_NAME/realms/$REALM
|
||||||
|
- OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME.$DOMAIN_NAME/auth/auth/openid_connect/callback
|
||||||
|
- OIDC_CLIENT_SECRET=${MASTODON_CLIENT_SECRET}
|
||||||
|
- SECRET_KEY_BASE=${MASTODON_ADMIN_PASSWORD}
|
||||||
|
- OTP_SECRET=${MASTODON_SESSION_SECRET}
|
||||||
|
- SMTP_SERVER=$SMTP_SERVER
|
||||||
|
- SMTP_PORT=$SMTP_PORT
|
||||||
|
- SMTP_LOGIN=$SMTP_USER
|
||||||
|
- SMTP_PASSWORD=$SMTP_PASSWORD
|
||||||
|
- SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
|
||||||
|
command: bundle exec sidekiq
|
||||||
|
depends_on:
|
||||||
|
- mastodon-db
|
||||||
|
- mastodon-redis
|
||||||
|
# networks:
|
||||||
|
# - external_network
|
||||||
|
# - internal_network
|
||||||
|
volumes:
|
||||||
|
- ./data/mastodon/system:/mastodon/public/system
|
||||||
|
healthcheck:
|
||||||
|
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
|
||||||
|
|
||||||
|
## Uncomment to enable federation with tor instances along with adding the following ENV variables
|
||||||
|
## http_proxy=http://privoxy:8118
|
||||||
|
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
|
||||||
|
# tor:
|
||||||
|
# image: sirboops/tor
|
||||||
|
# networks:
|
||||||
|
# - external_network
|
||||||
|
# - internal_network
|
||||||
|
#
|
||||||
|
# privoxy:
|
||||||
|
# image: sirboops/privoxy
|
||||||
|
# volumes:
|
||||||
|
# - ./priv-config:/opt/config
|
||||||
|
# networks:
|
||||||
|
# - external_network
|
||||||
|
# - internal_network
|
||||||
|
|
||||||
|
# add the subdomain nginx configuration into the nginx volume
|
||||||
|
# as well as the cache directory so that nginx can send files directly from it
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./mastodon/nginx.conf:/etc/nginx/templates/mastodon.conf.template:ro
|
||||||
|
- ./data/mastodon/system/cache:/mastodon/system/cache:ro
|
||||||
|
- ./data/mastodon/system/media_attachments:/mastodon/system/media_attachments:ro
|
||||||
|
- ./data/mastodon/system/accounts:/mastodon/system/accounts:ro
|
||||||
|
|
||||||
|
# add the subdomain client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/mastodon/secrets
|
||||||
|
volumes:
|
||||||
|
- ./mastodon/keycloak.sh:/keycloak-setup/mastodon.sh:ro
|
||||||
|
|
||||||
|
#networks:
|
||||||
|
# external_network:
|
||||||
|
# internal_network:
|
||||||
|
# internal: true
|
@ -1,4 +1,5 @@
|
|||||||
# Mastodon
|
# Mastodon
|
||||||
|
|
||||||
This is the vanilla version with Elastic Search and Single-Sign-On enabled.
|
This is the vanilla version with Elastic Search and Single-Sign-On enabled.
|
||||||
No other user accounts are allowed to join.
|
No other user accounts are allowed to join - you must use the Keycloak
|
||||||
|
server to create accounts and login.
|
||||||
|
@ -1,131 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
database:
|
|
||||||
image: postgres:13.4-alpine
|
|
||||||
restart: always
|
|
||||||
#shm_size: 256mb
|
|
||||||
networks:
|
|
||||||
- internal_network
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD', 'pg_isready', '-U', "mastodon", "-d", "mastodon_production"]
|
|
||||||
volumes:
|
|
||||||
- ../data/mastodon/database:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=mastodon
|
|
||||||
- POSTGRES_PASSWORD=mastodon
|
|
||||||
#- POSTGRES_DB=mastodon_production
|
|
||||||
|
|
||||||
redis:
|
|
||||||
image: redis:6-alpine
|
|
||||||
restart: always
|
|
||||||
networks:
|
|
||||||
- internal_network
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD', 'redis-cli', 'ping']
|
|
||||||
volumes:
|
|
||||||
- ../data/mastodon/redis:/data
|
|
||||||
|
|
||||||
es:
|
|
||||||
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
|
||||||
- "cluster.name=es-mastodon"
|
|
||||||
- "discovery.type=single-node"
|
|
||||||
- "bootstrap.memory_lock=true"
|
|
||||||
networks:
|
|
||||||
- internal_network
|
|
||||||
healthcheck:
|
|
||||||
test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
|
|
||||||
volumes:
|
|
||||||
- ../data/mastodon/elasticsearch:/usr/share/elasticsearch/data
|
|
||||||
# fixup the permissions on the data directory since they are created as root on host
|
|
||||||
entrypoint: /bin/sh -c "chown -R elasticsearch:elasticsearch data && /usr/local/bin/docker-entrypoint.sh eswrapper"
|
|
||||||
ulimits:
|
|
||||||
memlock:
|
|
||||||
soft: -1
|
|
||||||
hard: -1
|
|
||||||
|
|
||||||
mastodon:
|
|
||||||
image: tootsuite/mastodon
|
|
||||||
restart: always
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/mastodon/env.secrets
|
|
||||||
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 6001"
|
|
||||||
networks:
|
|
||||||
- external_network
|
|
||||||
- internal_network
|
|
||||||
healthcheck:
|
|
||||||
# prettier-ignore
|
|
||||||
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:6001/health || exit 1']
|
|
||||||
ports:
|
|
||||||
- '6001:6001'
|
|
||||||
depends_on:
|
|
||||||
- database
|
|
||||||
- redis
|
|
||||||
- es
|
|
||||||
volumes:
|
|
||||||
- ../data/mastodon/system:/mastodon/public/system
|
|
||||||
|
|
||||||
streaming:
|
|
||||||
image: tootsuite/mastodon
|
|
||||||
restart: always
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/mastodon/env.secrets
|
|
||||||
command: node ./streaming
|
|
||||||
networks:
|
|
||||||
- external_network
|
|
||||||
- internal_network
|
|
||||||
healthcheck:
|
|
||||||
# prettier-ignore
|
|
||||||
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
|
|
||||||
ports:
|
|
||||||
- '4000:4000'
|
|
||||||
depends_on:
|
|
||||||
- database
|
|
||||||
- redis
|
|
||||||
|
|
||||||
sidekiq:
|
|
||||||
image: tootsuite/mastodon
|
|
||||||
restart: always
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/mastodon/env.secrets
|
|
||||||
command: bundle exec sidekiq
|
|
||||||
depends_on:
|
|
||||||
- database
|
|
||||||
- redis
|
|
||||||
networks:
|
|
||||||
- external_network
|
|
||||||
- internal_network
|
|
||||||
volumes:
|
|
||||||
- ../data/mastodon/system:/mastodon/public/system
|
|
||||||
healthcheck:
|
|
||||||
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
|
|
||||||
|
|
||||||
## Uncomment to enable federation with tor instances along with adding the following ENV variables
|
|
||||||
## http_proxy=http://privoxy:8118
|
|
||||||
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
|
|
||||||
# tor:
|
|
||||||
# image: sirboops/tor
|
|
||||||
# networks:
|
|
||||||
# - external_network
|
|
||||||
# - internal_network
|
|
||||||
#
|
|
||||||
# privoxy:
|
|
||||||
# image: sirboops/privoxy
|
|
||||||
# volumes:
|
|
||||||
# - ./priv-config:/opt/config
|
|
||||||
# networks:
|
|
||||||
# - external_network
|
|
||||||
# - internal_network
|
|
||||||
|
|
||||||
networks:
|
|
||||||
external_network:
|
|
||||||
internal_network:
|
|
||||||
internal: true
|
|
@ -0,0 +1,32 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
|
||||||
|
rm -f /mastodon/tmp/pids/server.pid
|
||||||
|
|
||||||
|
export MASTODON_DIR=/mastodon/public/system
|
||||||
|
export VAPID_KEY="$MASTODON_DIR/vapid_key"
|
||||||
|
export DB_SETUP="$MASTODON_DIR/db_done"
|
||||||
|
|
||||||
|
chown -R mastodon:mastodon "$MASTODON_DIR"
|
||||||
|
|
||||||
|
exec su mastodon <<EOF
|
||||||
|
|
||||||
|
export PATH="$PATH:/opt/ruby/bin:/opt/node/bin:/opt/mastodon/bin"
|
||||||
|
|
||||||
|
if [ ! -r "$VAPID_KEY" ]; then
|
||||||
|
rails mastodon:webpush:generate_vapid_key > "$VAPID_KEY" \
|
||||||
|
|| exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
. "$VAPID_KEY"
|
||||||
|
|
||||||
|
if [ ! -r "$DB_SETUP" ]; then
|
||||||
|
rails db:setup \
|
||||||
|
|| exit 1
|
||||||
|
|
||||||
|
touch "$DB_SETUP"
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec bundle exec rails s -p 6001
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
|
||||||
|
client-create mastodon "$MASTODON_HOSTNAME.$DOMAIN_NAME" "$MASTODON_CLIENT_SECRET" </dev/null
|
@ -0,0 +1,154 @@
|
|||||||
|
map $http_upgrade $connection_upgrade {
|
||||||
|
default upgrade;
|
||||||
|
'' close;
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream mastodon-backend {
|
||||||
|
server mastodon:6001 fail_timeout=0;
|
||||||
|
}
|
||||||
|
|
||||||
|
upstream mastodon-streaming {
|
||||||
|
server mastodon-streaming:4000 fail_timeout=0;
|
||||||
|
}
|
||||||
|
|
||||||
|
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=CACHE:10m inactive=7d max_size=1g;
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 443 ssl http2;
|
||||||
|
server_name ${MASTODON_HOSTNAME} ${MASTODON_HOSTNAME}.${DOMAIN_NAME};
|
||||||
|
|
||||||
|
ssl_protocols TLSv1.2 TLSv1.3;
|
||||||
|
ssl_ciphers HIGH:!MEDIUM:!LOW:!aNULL:!NULL:!SHA;
|
||||||
|
ssl_prefer_server_ciphers on;
|
||||||
|
ssl_session_cache shared:SSL:10m;
|
||||||
|
ssl_session_tickets off;
|
||||||
|
include /etc/nginx/includes/challenge.conf;
|
||||||
|
|
||||||
|
# Uncomment these lines once you acquire a certificate:
|
||||||
|
# ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem;
|
||||||
|
# ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem;
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
|
||||||
|
|
||||||
|
keepalive_timeout 70;
|
||||||
|
sendfile on;
|
||||||
|
client_max_body_size 80m;
|
||||||
|
|
||||||
|
root /mastodon;
|
||||||
|
|
||||||
|
gzip on;
|
||||||
|
gzip_disable "msie6";
|
||||||
|
gzip_vary on;
|
||||||
|
gzip_proxied any;
|
||||||
|
gzip_comp_level 6;
|
||||||
|
gzip_buffers 16 8k;
|
||||||
|
gzip_http_version 1.1;
|
||||||
|
gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript image/svg+xml image/x-icon;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
# If Docker is used for deployment and Rails serves static files,
|
||||||
|
# then needed must replace line `try_files $uri =404;` with `try_files $uri @proxy;`.
|
||||||
|
location = /sw.js {
|
||||||
|
add_header Cache-Control "public, max-age=604800, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
#try_files $uri =404;
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/assets/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/avatars/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/emoji/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/headers/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/packs/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/shortcuts/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/sounds/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, must-revalidate";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^/system/ {
|
||||||
|
add_header Cache-Control "public, max-age=2419200, immutable";
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
try_files $uri @proxy;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ^~ /api/v1/streaming {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header Proxy "";
|
||||||
|
|
||||||
|
proxy_pass http://mastodon-streaming;
|
||||||
|
proxy_buffering off;
|
||||||
|
proxy_redirect off;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection $connection_upgrade;
|
||||||
|
|
||||||
|
add_header Strict-Transport-Security "max-age=63072000; includeSubDomains";
|
||||||
|
|
||||||
|
tcp_nodelay on;
|
||||||
|
}
|
||||||
|
|
||||||
|
location @proxy {
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header Proxy "";
|
||||||
|
proxy_pass_header Server;
|
||||||
|
|
||||||
|
proxy_pass http://mastodon-backend;
|
||||||
|
proxy_buffering on;
|
||||||
|
proxy_redirect off;
|
||||||
|
proxy_http_version 1.1;
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection $connection_upgrade;
|
||||||
|
|
||||||
|
proxy_cache CACHE;
|
||||||
|
proxy_cache_valid 200 7d;
|
||||||
|
proxy_cache_valid 410 24h;
|
||||||
|
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504;
|
||||||
|
add_header X-Cached $upstream_cache_status;
|
||||||
|
|
||||||
|
tcp_nodelay on;
|
||||||
|
}
|
||||||
|
|
||||||
|
error_page 404 500 501 502 503 504 /500.html;
|
||||||
|
}
|
@ -1,78 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "ERROR: $@" ; exit 1 ; }
|
|
||||||
info() { echo >&2 "$@" ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production
|
|
||||||
source ./env.production
|
|
||||||
source "../env.smtp" 2>/dev/null
|
|
||||||
|
|
||||||
mkdir -p ../data/mastodon/system
|
|
||||||
chmod 777 ../data/mastodon/system
|
|
||||||
|
|
||||||
SECRETS="../data/mastodon/env.secrets"
|
|
||||||
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "unable to restart mastodon"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# have to bring it all down before we touch the files
|
|
||||||
docker-compose down
|
|
||||||
|
|
||||||
OIDC_CLIENT_SECRET="$(openssl rand -hex 32)"
|
|
||||||
|
|
||||||
# create the secrets file,
|
|
||||||
# along with some parameters that should be in the environment
|
|
||||||
mkdir -p "$(dirname "$SECRETS")"
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# DO NOT CHECK IN
|
|
||||||
WEB_DOMAIN=$MASTODON_HOSTNAME
|
|
||||||
LOCAL_DOMAIN=$DOMAIN_NAME
|
|
||||||
OIDC_DISPLAY_NAME=$REALM
|
|
||||||
OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME/realms/$REALM
|
|
||||||
OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME/auth/auth/openid_connect/callback
|
|
||||||
OIDC_CLIENT_SECRET=$OIDC_CLIENT_SECRET
|
|
||||||
SECRET_KEY_BASE=$(openssl rand -hex 32)
|
|
||||||
OTP_SECRET=$(openssl rand -hex 32)
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [ -n "$SMTP_SERVER" ]; then
|
|
||||||
cat <<EOF >> "$SECRETS"
|
|
||||||
SMTP_SERVER=$SMTP_SERVER
|
|
||||||
SMTP_PORT=$SMTP_PORT
|
|
||||||
SMTP_LOGIN=$SMTP_USER
|
|
||||||
SMTP_PASSWORD=$SMTP_PASSWORD
|
|
||||||
SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
info "mastodon: creating push keys"
|
|
||||||
docker-compose run --rm mastodon \
|
|
||||||
rails mastodon:webpush:generate_vapid_key \
|
|
||||||
>> "$SECRETS" \
|
|
||||||
|| die "unable to generate vapid key"
|
|
||||||
|
|
||||||
info "mastodon: setting up database"
|
|
||||||
docker-compose run --rm mastodon \
|
|
||||||
rails db:setup \
|
|
||||||
|| die "unable to login"
|
|
||||||
|
|
||||||
source "$SECRETS"
|
|
||||||
|
|
||||||
info "mastodon: creating keycloak interface"
|
|
||||||
../keycloak/client-delete mastodon
|
|
||||||
../keycloak/client-create <<EOF || die "Unable to create keycloak client"
|
|
||||||
{
|
|
||||||
"clientId": "mastodon",
|
|
||||||
"rootUrl": "https://$MASTODON_HOSTNAME/",
|
|
||||||
"adminUrl": "https://$MASTODON_HOSTNAME/",
|
|
||||||
"redirectUris": [ "https://$MASTODON_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$MASTODON_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$OIDC_CLIENT_SECRET"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
docker-compose up -d || die "mastodon: unable to start container"
|
|
@ -0,0 +1,56 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
# the default synpase uses a sqlite database; this should be fixed at somepoint
|
||||||
|
# matrix-db:
|
||||||
|
# image: postgres:13.4-alpine
|
||||||
|
# restart: unless-stopped
|
||||||
|
# volumes:
|
||||||
|
# - ./data/matrix/db:/var/lib/postgresql/data
|
||||||
|
# environment:
|
||||||
|
# - POSTGRES_DB=synapse
|
||||||
|
# - POSTGRES_USER=synapse
|
||||||
|
# - POSTGRES_PASSWORD=STRONGPASSWORD
|
||||||
|
|
||||||
|
matrix-element:
|
||||||
|
image: vectorim/element-web:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: matrix-element
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
volumes:
|
||||||
|
- ./matrix/10-envsubst-config.sh:/docker-entrypoint.d/10-envsubst-config.sh:ro
|
||||||
|
- ./matrix/config.sample.json:/app/config.sample.json:ro
|
||||||
|
depends_on:
|
||||||
|
- matrix-synapse
|
||||||
|
# ports:
|
||||||
|
# - "5000:80"
|
||||||
|
|
||||||
|
matrix-synapse:
|
||||||
|
image: matrixdotorg/synapse:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
container_name: matrix-synapse
|
||||||
|
volumes:
|
||||||
|
- ./data/matrix/synapse:/data
|
||||||
|
- ./matrix/entrypoint-synapse.sh:/entrypoint.sh:ro
|
||||||
|
entrypoint: ["/entrypoint.sh"]
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
environment:
|
||||||
|
- MATRIX_CLIENT_SECRET=${MATRIX_CLIENT_SECRET}
|
||||||
|
depends_on:
|
||||||
|
- keycloak
|
||||||
|
- nginx
|
||||||
|
# ports:
|
||||||
|
# - "5008:8008"
|
||||||
|
|
||||||
|
# add the nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./matrix/nginx.conf:/etc/nginx/templates/matrix.conf.template:ro
|
||||||
|
|
||||||
|
# add the client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/matrix/secrets
|
||||||
|
volumes:
|
||||||
|
- ./matrix/keycloak.sh:/keycloak-setup/matrix.sh:ro
|
@ -0,0 +1,5 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
echo >&2 "**** Configuring for $DOMAIN_NAME"
|
||||||
|
envsubst < /app/config.sample.json > /app/config.json
|
||||||
|
head /app/config.json
|
@ -0,0 +1,3 @@
|
|||||||
|
# Matrix/Element chat
|
||||||
|
|
||||||
|
The sample config for the JSON comes from 8891698745897388db037ea8692937edc199630c on vector-im/element-web
|
@ -0,0 +1,53 @@
|
|||||||
|
{
|
||||||
|
"default_server_config": {
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://${MATRIX_HOSTNAME}.${DOMAIN_NAME}",
|
||||||
|
"server_name": "${DOMAIN_NAME}"
|
||||||
|
},
|
||||||
|
"m.identity_server": {
|
||||||
|
"base_url": "https://vector.im"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"disable_custom_urls": false,
|
||||||
|
"disable_guests": false,
|
||||||
|
"disable_login_language_selector": false,
|
||||||
|
"disable_3pid_login": false,
|
||||||
|
"brand": "Element",
|
||||||
|
"integrations_ui_url": "https://scalar.vector.im/",
|
||||||
|
"integrations_rest_url": "https://scalar.vector.im/api",
|
||||||
|
"integrations_widgets_urls": [
|
||||||
|
"https://scalar.vector.im/_matrix/integrations/v1",
|
||||||
|
"https://scalar.vector.im/api",
|
||||||
|
"https://scalar-staging.vector.im/_matrix/integrations/v1",
|
||||||
|
"https://scalar-staging.vector.im/api",
|
||||||
|
"https://scalar-staging.riot.im/scalar/api"
|
||||||
|
],
|
||||||
|
"bug_report_endpoint_url": "https://element.io/bugreports/submit",
|
||||||
|
"uisi_autorageshake_app": "element-auto-uisi",
|
||||||
|
"default_country_code": "GB",
|
||||||
|
"show_labs_settings": false,
|
||||||
|
"features": { },
|
||||||
|
"default_federate": true,
|
||||||
|
"default_theme": "light",
|
||||||
|
"room_directory": {
|
||||||
|
"servers": [
|
||||||
|
"matrix.org"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"enable_presence_by_hs_url": {
|
||||||
|
"https://matrix.org": false,
|
||||||
|
"https://matrix-client.matrix.org": false
|
||||||
|
},
|
||||||
|
"setting_defaults": {
|
||||||
|
"breadcrumbs": true
|
||||||
|
},
|
||||||
|
"jitsi": {
|
||||||
|
"preferred_domain": "meet.element.io"
|
||||||
|
},
|
||||||
|
"element_call": {
|
||||||
|
"url": "https://call.element.io",
|
||||||
|
"participant_limit": 8,
|
||||||
|
"brand": "Element Call"
|
||||||
|
},
|
||||||
|
"map_style_url": "https://api.maptiler.com/maps/streets/style.json?key=fU3vlMsMn4Jb6dnEIFsx"
|
||||||
|
}
|
@ -1,27 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
postgres:
|
|
||||||
image: postgres:13.4-alpine
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ../data/matrix/postgresdata:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_DB=synapse
|
|
||||||
- POSTGRES_USER=synapse
|
|
||||||
- POSTGRES_PASSWORD=STRONGPASSWORD
|
|
||||||
|
|
||||||
element:
|
|
||||||
image: vectorim/element-web:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ../data/matrix/element-config.json:/app/config.json
|
|
||||||
ports:
|
|
||||||
- "5000:80"
|
|
||||||
|
|
||||||
synapse:
|
|
||||||
image: matrixdotorg/synapse:latest
|
|
||||||
restart: unless-stopped
|
|
||||||
volumes:
|
|
||||||
- ../data/matrix/synapse:/data
|
|
||||||
ports:
|
|
||||||
- "5008:8008"
|
|
@ -1,73 +0,0 @@
|
|||||||
{
|
|
||||||
"default_server_config": {
|
|
||||||
"m.homeserver": {
|
|
||||||
"base_url": "https://${MATRIX_HOSTNAME}",
|
|
||||||
"server_name": "${DOMAIN_NAME}"
|
|
||||||
},
|
|
||||||
"m.identity_server": {
|
|
||||||
"base_url": "https://vector.im"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"brand": "Element",
|
|
||||||
"integrations_ui_url": "https://scalar.vector.im/",
|
|
||||||
"integrations_rest_url": "https://scalar.vector.im/api",
|
|
||||||
"integrations_widgets_urls": [
|
|
||||||
"https://scalar.vector.im/_matrix/integrations/v1",
|
|
||||||
"https://scalar.vector.im/api",
|
|
||||||
"https://scalar-staging.vector.im/_matrix/integrations/v1",
|
|
||||||
"https://scalar-staging.vector.im/api",
|
|
||||||
"https://scalar-staging.riot.im/scalar/api"
|
|
||||||
],
|
|
||||||
"hosting_signup_link": "https://element.io/matrix-services?utm_source=element-web&utm_medium=web",
|
|
||||||
"bug_report_endpoint_url": "https://element.io/bugreports/submit",
|
|
||||||
"uisi_autorageshake_app": "element-auto-uisi",
|
|
||||||
"showLabsSettings": true,
|
|
||||||
"piwik": {
|
|
||||||
"url": "https://piwik.riot.im/",
|
|
||||||
"siteId": 1,
|
|
||||||
"policyUrl": "https://element.io/cookie-policy"
|
|
||||||
},
|
|
||||||
"roomDirectory": {
|
|
||||||
"servers": [
|
|
||||||
"matrix.org",
|
|
||||||
"gitter.im",
|
|
||||||
"libera.chat"
|
|
||||||
]
|
|
||||||
},
|
|
||||||
"enable_presence_by_hs_url": {
|
|
||||||
"https://matrix.org": false,
|
|
||||||
"https://matrix-client.matrix.org": false
|
|
||||||
},
|
|
||||||
"terms_and_conditions_links": [
|
|
||||||
{
|
|
||||||
"url": "https://element.io/privacy",
|
|
||||||
"text": "Privacy Policy"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"url": "https://element.io/cookie-policy",
|
|
||||||
"text": "Cookie Policy"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"hostSignup": {
|
|
||||||
"brand": "Element Home",
|
|
||||||
"cookiePolicyUrl": "https://element.io/cookie-policy",
|
|
||||||
"domains": [
|
|
||||||
"matrix.org"
|
|
||||||
],
|
|
||||||
"privacyPolicyUrl": "https://element.io/privacy",
|
|
||||||
"termsOfServiceUrl": "https://element.io/terms-of-service",
|
|
||||||
"url": "https://ems.element.io/element-home/in-app-loader"
|
|
||||||
},
|
|
||||||
"sentry": {
|
|
||||||
"dsn": "https://029a0eb289f942508ae0fb17935bd8c5@sentry.matrix.org/6",
|
|
||||||
"environment": "develop"
|
|
||||||
},
|
|
||||||
"posthog": {
|
|
||||||
"projectApiKey": "phc_Jzsm6DTm6V2705zeU5dcNvQDlonOR68XvX2sh1sEOHO",
|
|
||||||
"apiHost": "https://posthog.element.io"
|
|
||||||
},
|
|
||||||
"features": {
|
|
||||||
"feature_spotlight": true
|
|
||||||
},
|
|
||||||
"map_style_url": "https://api.maptiler.com/maps/streets/style.json?key=fU3vlMsMn4Jb6dnEIFsx"
|
|
||||||
}
|
|
@ -0,0 +1,68 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# This is the custom startup script for the synpase server
|
||||||
|
|
||||||
|
# fix up the Element client config to have the correct hostname
|
||||||
|
# based on the environment variables
|
||||||
|
#export DOMAIN_NAME MATRIX_HOSTNAME
|
||||||
|
#envsubst < "element-config.json.template" > "$DATA/element-config.json"
|
||||||
|
|
||||||
|
HOMESERVER_YAML="/data/homeserver.yaml"
|
||||||
|
|
||||||
|
if [ ! -r "$HOMESERVER_YAML" ]; then
|
||||||
|
echo >&2 "***** Configuring the home server for $DOMAIN_NAME *****"
|
||||||
|
|
||||||
|
export SYNAPSE_SERVER_NAME="$DOMAIN_NAME"
|
||||||
|
export SYNAPSE_REPORT_STATS="no"
|
||||||
|
|
||||||
|
/start.py generate \
|
||||||
|
|| exit 1
|
||||||
|
|
||||||
|
echo >&2 "***** Adding OIDC provider *****"
|
||||||
|
cat <<EOF >> "$HOMESERVER_YAML"
|
||||||
|
#
|
||||||
|
# added by hackerspace-zone setup scripts
|
||||||
|
#
|
||||||
|
enable_metrics: true
|
||||||
|
suppress_key_server_warning: true
|
||||||
|
web_client_location: https://${MATRIX_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
public_baseurl: https://${MATRIX_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
oidc_providers:
|
||||||
|
- idp_id: keycloak
|
||||||
|
idp_name: "Keycloak"
|
||||||
|
issuer: "https://${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}/realms/${REALM}"
|
||||||
|
client_id: "matrix"
|
||||||
|
client_secret: "${MATRIX_CLIENT_SECRET}"
|
||||||
|
scopes: ["openid", "profile"]
|
||||||
|
user_mapping_provider:
|
||||||
|
config:
|
||||||
|
localpart_template: "{{ user.preferred_username }}"
|
||||||
|
display_name_template: "{{ user.name }}"
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# enable promemtheus metrics on a local-only port
|
||||||
|
# since this is in docker, it won't be exposed to any outside connections
|
||||||
|
echo >&2 "**** Enabling prometheus metrics ****"
|
||||||
|
sed -i -e '/^listeners:/a\ - port: 9000\n type: metrics\n' "$HOMESERVER_YAML"
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! grep -q '^ smtp_host:' && [ -n "$SMTP_SERVER" ]; then
|
||||||
|
echo >&2 "***** Adding SMTP setup to yaml"
|
||||||
|
cat <<EOF >> "$HOMESERVER_YAML"
|
||||||
|
#
|
||||||
|
# added by hackerspace-zone setup scripts
|
||||||
|
#
|
||||||
|
email:
|
||||||
|
smtp_host: ${SMTP_SERVER}
|
||||||
|
smtp_port: ${SMTP_PORT}
|
||||||
|
smtp_user: "${SMTP_USER}"
|
||||||
|
smtp_pass: "${SMTP_PASSWORD}"
|
||||||
|
require_transport_security: true
|
||||||
|
notif_from: "%(app)s matrix homeserver <noreply@${DOMAIN_NAME}>"
|
||||||
|
app_name: ${DOMAIN_NAME}
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
|
|
||||||
|
# hack to let keycloak startup
|
||||||
|
sleep 5
|
||||||
|
exec /start.py
|
@ -1 +0,0 @@
|
|||||||
# variables
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# Setup the OAuth client connection
|
||||||
|
|
||||||
|
client-create matrix "$MATRIX_HOSTNAME.$DOMAIN_NAME" "$MATRIX_CLIENT_SECRET" </dev/null
|
@ -0,0 +1,71 @@
|
|||||||
|
map $http_upgrade $connection_upgrade {
|
||||||
|
default upgrade;
|
||||||
|
'' close;
|
||||||
|
}
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name ${MATRIX_HOSTNAME} ${MATRIX_HOSTNAME}.${DOMAIN_NAME};
|
||||||
|
client_max_body_size 128m;
|
||||||
|
|
||||||
|
sendfile on;
|
||||||
|
tcp_nopush on;
|
||||||
|
tcp_nodelay on;
|
||||||
|
keepalive_timeout 65;
|
||||||
|
types_hash_max_size 2048;
|
||||||
|
#include /etc/nginx/mime.types;
|
||||||
|
#default_type application/octet-stream;
|
||||||
|
|
||||||
|
gzip on;
|
||||||
|
gzip_disable "msie6";
|
||||||
|
|
||||||
|
proxy_read_timeout 1800s;
|
||||||
|
|
||||||
|
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
|
||||||
|
chunked_transfer_encoding on;
|
||||||
|
|
||||||
|
location / {
|
||||||
|
proxy_pass http://matrix-element:80;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_set_header X-Real-IP $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
}
|
||||||
|
|
||||||
|
location ~ ^(/_matrix|/_synapse/client) {
|
||||||
|
# note: do not add a path (even a single /) after the port in `proxy_pass`,
|
||||||
|
# otherwise nginx will canonicalise the URI and cause signature verification
|
||||||
|
# errors.
|
||||||
|
proxy_pass http://matrix-synapse:8008;
|
||||||
|
proxy_set_header X-Forwarded-For $remote_addr;
|
||||||
|
proxy_set_header X-Forwarded-Proto $scheme;
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
|
||||||
|
# Nginx by default only allows file uploads up to 1M in size
|
||||||
|
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
|
||||||
|
client_max_body_size 50M;
|
||||||
|
}
|
||||||
|
|
||||||
|
# serve the static content for the well known files
|
||||||
|
location /.well-known/matrix/server {
|
||||||
|
default_type application/json;
|
||||||
|
return 200 '{"m.server": "${MATRIX_HOSTNAME}.${DOMAIN_NAME}:443"}';
|
||||||
|
}
|
||||||
|
|
||||||
|
location /.well-known/matrix/client {
|
||||||
|
default_type application/json;
|
||||||
|
return 200 '{"m.homeserver":{"base_url": "https://${MATRIX_HOSTNAME}.${DOMAIN_NAME}"}}';
|
||||||
|
}
|
||||||
|
|
||||||
|
# The federation port is also enabled, although it can also go through 443
|
||||||
|
listen 8448 ssl http2 default_server;
|
||||||
|
#listen [::]:8448 ssl http2 default_server;
|
||||||
|
|
||||||
|
# For the user connection
|
||||||
|
listen 443 ssl http2;
|
||||||
|
|
||||||
|
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
||||||
|
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
|
||||||
|
include /etc/nginx/includes/options-ssl-nginx.conf;
|
||||||
|
include /etc/nginx/includes/challenge.conf;
|
||||||
|
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
|
||||||
|
}
|
@ -1,88 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "matrix: ERROR $@" ; exit 1 ; }
|
|
||||||
info() { echo >&2 "matrix: $@" ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production || die "no top levle env?"
|
|
||||||
source ../env.smtp 2>/dev/null
|
|
||||||
source env.production || die "no local env?"
|
|
||||||
|
|
||||||
DATA="../data/matrix"
|
|
||||||
SYNAPSE_DIR="$DATA/synapse"
|
|
||||||
HOMESERVER_YAML="$SYNAPSE_DIR/homeserver.yaml"
|
|
||||||
if [ -r "$HOMESERVER_YAML" ]; then
|
|
||||||
docker-compose up -d || die "matrix: unable to restart"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
mkdir -p "$DATA"
|
|
||||||
|
|
||||||
# fix up the Element client config to have the correct hostname
|
|
||||||
# based on the environment variables
|
|
||||||
export DOMAIN_NAME MATRIX_HOSTNAME
|
|
||||||
envsubst < "element-config.json.template" > "$DATA/element-config.json"
|
|
||||||
|
|
||||||
|
|
||||||
# This will create a *delegated* matrix server,
|
|
||||||
# where the "servername" is just the top level domain,
|
|
||||||
# but it is hosted on "matrix.DOMAIN_NAME".
|
|
||||||
# the syntax here is confusing and it is not clear in
|
|
||||||
# the docs *which* have to be updated.
|
|
||||||
docker-compose run \
|
|
||||||
--rm \
|
|
||||||
-e SYNAPSE_SERVER_NAME="$DOMAIN_NAME" \
|
|
||||||
-e SYNAPSE_REPORT_STATS="no" \
|
|
||||||
synapse generate \
|
|
||||||
|| die "unable to generate synapse config"
|
|
||||||
|
|
||||||
MATRIX_CLIENT_SECRET="$(openssl rand -hex 20)"
|
|
||||||
|
|
||||||
cat <<EOF >> "$HOMESERVER_YAML"
|
|
||||||
web_client_location: https://${MATRIX_HOSTNAME}/
|
|
||||||
public_baseurl: https://${MATRIX_HOSTNAME}/
|
|
||||||
oidc_providers:
|
|
||||||
- idp_id: keycloak
|
|
||||||
idp_name: "KeyCloak"
|
|
||||||
issuer: "https://${KEYCLOAK_HOSTNAME}/realms/${REALM}"
|
|
||||||
client_id: "synapse"
|
|
||||||
client_secret: "${MATRIX_CLIENT_SECRET}"
|
|
||||||
scopes: ["openid", "profile"]
|
|
||||||
user_mapping_provider:
|
|
||||||
config:
|
|
||||||
localpart_template: "{{ user.preferred_username }}"
|
|
||||||
display_name_template: "{{ user.name }}"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [ -n "$SMTP_SERVER" ]; then
|
|
||||||
info "configuring email"
|
|
||||||
cat <<EOF >> "$HOMESERVER_YAML"
|
|
||||||
email:
|
|
||||||
smtp_host: ${SMTP_SERVER}
|
|
||||||
smtp_port: ${SMTP_PORT}
|
|
||||||
smtp_user: "${SMTP_USER}"
|
|
||||||
smtp_pass: "${SMTP_PASSWORD}"
|
|
||||||
require_transport_security: true
|
|
||||||
notif_from: "%(app)s matrix homeserver <noreply@${DOMAIN_NAME}>"
|
|
||||||
app_name: ${DOMAIN_NAME}
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
../keycloak/client-delete 'synapse' 2>/dev/null
|
|
||||||
|
|
||||||
../keycloak/client-create << EOF || die "unable to create client id"
|
|
||||||
{
|
|
||||||
"clientId": "synapse",
|
|
||||||
"rootUrl": "https://$MATRIX_HOSTNAME/",
|
|
||||||
"adminUrl": "https://$MATRIX_HOSTNAME/",
|
|
||||||
"redirectUris": [ "https://$MATRIX_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$MATRIX_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$MATRIX_CLIENT_SECRET"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
docker-compose up -d || die "matrix: unable to start container"
|
|
@ -0,0 +1,62 @@
|
|||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
mobilizon:
|
||||||
|
image: framasoft/mobilizon
|
||||||
|
container_name: mobilizon
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./data/mobilizon/uploads:/var/lib/mobilizon/uploads
|
||||||
|
- ./mobilizon/config.exs:/etc/mobilizon/config.exs:ro
|
||||||
|
environment:
|
||||||
|
- KEYCLOAK_HOSTNAME=${KEYCLOAK_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
- REALM=${REALM}
|
||||||
|
- MOBILIZON_INSTANCE_NAME=${MOBILIZON_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
- MOBILIZON_INSTANCE_HOST=${MOBILIZON_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
- MOBILIZON_INSTANCE_SECRET_KEY_BASE=${MOBILIZON_ADMIN_PASSWORD}
|
||||||
|
- MOBILIZON_INSTANCE_SECRET_KEY=${MOBILIZON_SESSION_SECRET}
|
||||||
|
- MOBILIZON_CLIENT_SECRET=${MOBILIZON_CLIENT_SECRET}
|
||||||
|
- MOBILIZON_INSTANCE_EMAIL=events@${DOMAIN_NAME}
|
||||||
|
- MOBILIZON_REPLY_EMAIL=noreply@${DOMAIN_NAME}
|
||||||
|
- MOBILIZON_SMTP_SERVER=${SMTP_SERVER}
|
||||||
|
- MOBILIZON_SMTP_PORT=${SMTP_PORT}
|
||||||
|
- MOBILIZON_SMTP_USERNAME=${SMTP_USER}
|
||||||
|
- MOBILIZON_SMTP_PASSWORD=${SMTP_PASSWORD}
|
||||||
|
- MOBILIZON_SMTP_SSL=true
|
||||||
|
- MOBILIZON_DATABASE_USERNAME=mobilizon
|
||||||
|
- MOBILIZON_DATABASE_PASSWORD=mobilizon
|
||||||
|
- MOBILIZON_DATABASE_DBNAME=mobilizon
|
||||||
|
- MOBILIZON_DATABASE_HOST=mobilizon-db
|
||||||
|
- MOBILIZON_INSTANCE_REGISTRATIONS_OPEN=false
|
||||||
|
- MOBILIZON_INSTANCE_PORT=7000
|
||||||
|
user: root
|
||||||
|
entrypoint:
|
||||||
|
- "/bin/sh"
|
||||||
|
- "-c"
|
||||||
|
- "chmod 777 /var/lib/mobilizon/uploads && exec su -p nobody -s /bin/sh /docker-entrypoint.sh"
|
||||||
|
|
||||||
|
# ports:
|
||||||
|
# - "7000:7000"
|
||||||
|
|
||||||
|
mobilizon-db:
|
||||||
|
image: postgis/postgis:13-3.1
|
||||||
|
container_name: mobilizon-db
|
||||||
|
restart: always
|
||||||
|
volumes:
|
||||||
|
- ./data/mobilizon/db:/var/lib/postgresql/data
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=mobilizon
|
||||||
|
- POSTGRES_PASSWORD=mobilizon
|
||||||
|
- POSTGRES_DB=mobilizon
|
||||||
|
|
||||||
|
# add the nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./mobilizon/nginx.conf:/etc/nginx/templates/mobilizon.conf.template:ro
|
||||||
|
|
||||||
|
# add the client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/mobilizon/secrets
|
||||||
|
volumes:
|
||||||
|
- ./mobilizon/keycloak.sh:/keycloak-setup/mobilizon.sh:ro
|
@ -1,26 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
mobilizon:
|
|
||||||
image: framasoft/mobilizon
|
|
||||||
restart: always
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- ./env.production
|
|
||||||
- ../data/mobilizon/env.secrets
|
|
||||||
volumes:
|
|
||||||
- ../data/mobilizon/uploads:/var/lib/mobilizon/uploads
|
|
||||||
- ./config.exs:/etc/mobilizon/config.exs:ro
|
|
||||||
# - ${PWD}/GeoLite2-City.mmdb:/var/lib/mobilizon/geo_db/GeoLite2-City.mmdb
|
|
||||||
ports:
|
|
||||||
- "7000:7000"
|
|
||||||
|
|
||||||
db:
|
|
||||||
image: postgis/postgis:13-3.1
|
|
||||||
restart: always
|
|
||||||
volumes:
|
|
||||||
- ../data/mobilizon/db:/var/lib/postgresql/data
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=mobilizon
|
|
||||||
- POSTGRES_PASSWORD=mobilizon
|
|
||||||
- POSTGRES_DB=mobilizon
|
|
@ -1,24 +0,0 @@
|
|||||||
# Database settings
|
|
||||||
POSTGRES_USER=mobilizon
|
|
||||||
POSTGRES_PASSWORD=changethis
|
|
||||||
POSTGRES_DB=mobilizon
|
|
||||||
MOBILIZON_DATABASE_USERNAME=mobilizon
|
|
||||||
MOBILIZON_DATABASE_PASSWORD=mobilizon
|
|
||||||
MOBILIZON_DATABASE_DBNAME=mobilizon
|
|
||||||
MOBILIZON_DATABASE_HOST=db
|
|
||||||
|
|
||||||
|
|
||||||
# Instance configuration
|
|
||||||
MOBILIZON_INSTANCE_REGISTRATIONS_OPEN=false
|
|
||||||
MOBILIZON_INSTANCE_PORT=7000
|
|
||||||
|
|
||||||
MOBILIZON_INSTANCE_EMAIL=noreply@mobilizon.lan
|
|
||||||
MOBILIZON_REPLY_EMAIL=contact@mobilizon.lan
|
|
||||||
|
|
||||||
# Email settings
|
|
||||||
MOBILIZON_SMTP_SERVER=localhost
|
|
||||||
MOBILIZON_SMTP_PORT=25
|
|
||||||
MOBILIZON_SMTP_HOSTNAME=localhost
|
|
||||||
MOBILIZON_SMTP_USERNAME=noreply@mobilizon.lan
|
|
||||||
MOBILIZON_SMTP_PASSWORD=password
|
|
||||||
MOBILIZON_SMTP_SSL=false
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# Setup the OAuth client connection
|
||||||
|
|
||||||
|
client-create mobilizon "$MOBILIZON_HOSTNAME.$DOMAIN_NAME" "$MOBILIZON_CLIENT_SECRET" </dev/null
|
@ -1,62 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "mobilizon: $@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production || die "no top level env?"
|
|
||||||
source env.production || die "no local env?"
|
|
||||||
source ../env.smtp 2>/dev/null
|
|
||||||
|
|
||||||
DATA="../data/mobilizon"
|
|
||||||
SECRETS="$DATA/env.secrets"
|
|
||||||
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "unable to start"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
|
|
||||||
CLIENT_SECRET="$(openssl rand -hex 20)"
|
|
||||||
|
|
||||||
mkdir -p "$DATA/uploads"
|
|
||||||
chmod 777 "$DATA/uploads"
|
|
||||||
|
|
||||||
mkdir -p "$(dirname "$SECRETS")"
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# DO NOT CHECK IN
|
|
||||||
MOBILIZON_INSTANCE_NAME=${DOMAIN_NAME}
|
|
||||||
MOBILIZON_INSTANCE_HOST=${MOBILIZON_HOSTNAME}
|
|
||||||
MOBILIZON_INSTANCE_SECRET_KEY_BASE=$(openssl rand -hex 20)
|
|
||||||
MOBILIZON_INSTANCE_SECRET_KEY=$(openssl rand -hex 20)
|
|
||||||
MOBILIZON_CLIENT_SECRET=${CLIENT_SECRET}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
if [ -n "$SMTP_SERVER" ]; then
|
|
||||||
cat <<EOF >> "$SECRETS"
|
|
||||||
MOBILIZON_INSTANCE_EMAIL=events@${DOMAIN_NAME}
|
|
||||||
MOBILIZON_REPLY_EMAIL=noreply@${DOMAIN_NAME}
|
|
||||||
MOBILIZON_SMTP_SERVER=${SMTP_SERVER}
|
|
||||||
MOBILIZON_SMTP_PORT=${SMTP_PORT}
|
|
||||||
MOBILIZON_SMTP_USERNAME=${SMTP_USER}
|
|
||||||
MOBILIZON_SMTP_PASSWORD=${SMTP_PASSWORD}
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
|
|
||||||
../keycloak/client-delete mobilizon
|
|
||||||
|
|
||||||
../keycloak/client-create <<EOF || die "unable to create client"
|
|
||||||
{
|
|
||||||
"clientId": "mobilizon",
|
|
||||||
"rootUrl": "https://$MOBILIZON_HOSTNAME",
|
|
||||||
"adminUrl": "https://$MOBILIZON_HOSTNAME",
|
|
||||||
"redirectUris": [ "https://$MOBILIZON_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$MOBILIZON_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$CLIENT_SECRET"
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
||||||
docker-compose up -d || die "unable to start container"
|
|
||||||
|
|
||||||
|
|
@ -0,0 +1,48 @@
|
|||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
nextcloud-db:
|
||||||
|
image: postgres:13.4-alpine
|
||||||
|
container_name: nextcloud-db
|
||||||
|
restart: always
|
||||||
|
environment:
|
||||||
|
- POSTGRES_USER=nextcloud
|
||||||
|
- POSTGRES_PASSWORD=nextcloud
|
||||||
|
- POSTGRES_DB=nextcloud
|
||||||
|
volumes:
|
||||||
|
- ./data/nextcloud/database:/var/lib/postgresql/data
|
||||||
|
|
||||||
|
nextcloud:
|
||||||
|
image: nextcloud:25.0.1-apache
|
||||||
|
container_name: nextcloud
|
||||||
|
restart: always
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
environment:
|
||||||
|
POSTGRES_HOST: nextcloud-db
|
||||||
|
POSTGRES_DB: nextcloud
|
||||||
|
POSTGRES_USER: nextcloud
|
||||||
|
POSTGRES_PASSWORD: nextcloud
|
||||||
|
OVERWRITEPROTOCOL: https
|
||||||
|
NEXTCLOUD_ADMIN_USER: admin
|
||||||
|
NEXTCLOUD_ADMIN_PASSWORD: ${NEXTCLOUD_ADMIN_PASSWORD}
|
||||||
|
NEXTCLOUD_CLIENT_SECRET: ${NEXTCLOUD_CLIENT_SECRET}
|
||||||
|
NEXTCLOUD_TRUSTED_DOMAINS: ${NEXTCLOUD_HOSTNAME}.${DOMAIN_NAME}
|
||||||
|
volumes:
|
||||||
|
- ./data/nextcloud/nextcloud:/var/www/html
|
||||||
|
- ./nextcloud/setup.sh:/setup.sh:ro
|
||||||
|
depends_on:
|
||||||
|
- nextcloud-db
|
||||||
|
entrypoint: ["/setup.sh"]
|
||||||
|
|
||||||
|
# add the nginx configuration into the nginx volume
|
||||||
|
nginx:
|
||||||
|
volumes:
|
||||||
|
- ./nextcloud/nginx.conf:/etc/nginx/templates/nextcloud.conf.template:ro
|
||||||
|
|
||||||
|
# add the grafana client secrets to the keycloak-setup volume
|
||||||
|
keycloak-setup:
|
||||||
|
env_file:
|
||||||
|
- data/nextcloud/secrets
|
||||||
|
volumes:
|
||||||
|
- ./nextcloud/keycloak.sh:/keycloak-setup/nextcloud.sh:ro
|
@ -1,36 +0,0 @@
|
|||||||
version: "3"
|
|
||||||
|
|
||||||
services:
|
|
||||||
database:
|
|
||||||
image: postgres:13.4-alpine
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
- POSTGRES_USER=nextcloud
|
|
||||||
- POSTGRES_PASSWORD=nextcloud
|
|
||||||
- POSTGRES_DB=nextcloud
|
|
||||||
volumes:
|
|
||||||
- ../data/nextcloud/database:/var/lib/postgresql/data
|
|
||||||
|
|
||||||
nextcloud:
|
|
||||||
image: nextcloud:23.0.4
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- 9000:80
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
- ../data/nextcloud/env.secrets
|
|
||||||
environment:
|
|
||||||
POSTGRES_HOST: database
|
|
||||||
POSTGRES_DB: nextcloud
|
|
||||||
POSTGRES_USER: nextcloud
|
|
||||||
POSTGRES_PASSWORD: nextcloud
|
|
||||||
OVERWRITEPROTOCOL: https
|
|
||||||
NEXTCLOUD_ADMIN_USER: admin
|
|
||||||
# NEXTCLOUD_ADMIN_PASSWORD in env.secrets
|
|
||||||
# NEXTCLOUD_TRUSTED_DOMAINS also set in env.secrets
|
|
||||||
volumes:
|
|
||||||
- ../data/nextcloud/nextcloud:/var/www/html
|
|
||||||
depends_on:
|
|
||||||
- database
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
# non-secret nextcloud config
|
|
@ -0,0 +1,4 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
# Setup the OAuth client connection
|
||||||
|
|
||||||
|
client-create nextcloud "$NEXTCLOUD_HOSTNAME.$DOMAIN_NAME" "$NEXTCLOUD_CLIENT_SECRET" </dev/null
|
@ -1,82 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
source ../env.production || die "no top level env?"
|
|
||||||
source env.production || die "no local env?"
|
|
||||||
|
|
||||||
SECRETS="../data/nextcloud/env.secrets"
|
|
||||||
if [ -r "$SECRETS" ]; then
|
|
||||||
docker-compose up -d || die "nextcloud: unable to start"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose down 2>/dev/null
|
|
||||||
|
|
||||||
NEXTCLOUD_CLIENT_SECRET="$(openssl rand -hex 32)"
|
|
||||||
NEXTCLOUD_ADMIN_PASSWORD="$(openssl rand -hex 6)"
|
|
||||||
|
|
||||||
echo "Generating secrets: admin password $NEXTCLOUD_ADMIN_PASSWORD"
|
|
||||||
mkdir -p "$(dirname "$SECRETS")"
|
|
||||||
cat <<EOF > "$SECRETS"
|
|
||||||
# Do not check in!
|
|
||||||
NEXTCLOUD_ADMIN_PASSWORD=$NEXTCLOUD_ADMIN_PASSWORD
|
|
||||||
NEXTCLOUD_TRUSTED_DOMAINS=$NEXTCLOUD_HOSTNAME
|
|
||||||
NEXTCLOUD_CLIENT_SECRET=$NEXTCLOUD_CLIENT_SECRET
|
|
||||||
EOF
|
|
||||||
|
|
||||||
BASE="https://$KEYCLOAK_HOSTNAME/realms/$REALM/protocol/openid-connect"
|
|
||||||
PROVIDER="$(jq -c . <<EOF
|
|
||||||
{
|
|
||||||
"custom_oidc": [
|
|
||||||
{
|
|
||||||
"name": "keycloak",
|
|
||||||
"title": "Keycloak",
|
|
||||||
"clientId": "nextcloud",
|
|
||||||
"clientSecret": "$NEXTCLOUD_CLIENT_SECRET",
|
|
||||||
"authorizeUrl": "$BASE/auth",
|
|
||||||
"tokenUrl": "$BASE/token",
|
|
||||||
"userInfoUrl": "$BASE/userinfo",
|
|
||||||
"logoutUrl": "$BASE/logout",
|
|
||||||
"scope": "openid",
|
|
||||||
"groupsClaim": "roles",
|
|
||||||
"style": "keycloak",
|
|
||||||
"displayNameClaim": "",
|
|
||||||
"defaultGroup": ""
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
|
|
||||||
docker-compose up -d || die "unable to bring up docker"
|
|
||||||
|
|
||||||
# wait for the nextcloud instance to be responsive
|
|
||||||
# TODO: how to find out if it is ready?
|
|
||||||
echo "Sleeping a minute while nextcloud installs"
|
|
||||||
sleep 60
|
|
||||||
|
|
||||||
|
|
||||||
docker-compose exec -u www-data -T nextcloud bash -x <<EOF || die "unable to configure sociallogin"
|
|
||||||
./occ app:install calendar
|
|
||||||
./occ app:install sociallogin
|
|
||||||
./occ config:app:set sociallogin prevent_create_email_exists --value=1 || exit 1
|
|
||||||
./occ config:app:set sociallogin update_profile_on_login --value=1 || exit 1
|
|
||||||
./occ config:app:set sociallogin custom_providers --value='$PROVIDER' || exit 1
|
|
||||||
EOF
|
|
||||||
|
|
||||||
../keycloak/client-delete 'nextcloud' || echo "client did not exist?"
|
|
||||||
|
|
||||||
../keycloak/client-create << EOF || die "unable to create client id"
|
|
||||||
{
|
|
||||||
"clientId": "nextcloud",
|
|
||||||
"rootUrl": "https://$NEXTCLOUD_HOSTNAME/",
|
|
||||||
"adminUrl": "https://$NEXTCLOUD_HOSTNAME/",
|
|
||||||
"redirectUris": [ "https://$NEXTCLOUD_HOSTNAME/*" ],
|
|
||||||
"webOrigins": [ "https://$NEXTCLOUD_HOSTNAME" ],
|
|
||||||
"clientAuthenticatorType": "client-secret",
|
|
||||||
"secret": "$NEXTCLOUD_CLIENT_SECRET"
|
|
||||||
}
|
|
||||||
EOF
|
|
@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/bash -x
|
||||||
|
|
||||||
|
SERVER="apache2-foreground"
|
||||||
|
CANARY="/var/www/html/.installed"
|
||||||
|
if [ -r "$CANARY" ]; then
|
||||||
|
exec "/entrypoint.sh" "$SERVER"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo >&2 "**** installing nextcloud"
|
||||||
|
NEXTCLOUD_UPDATE=1 bash /entrypoint.sh date || exit 1
|
||||||
|
|
||||||
|
echo >&2 "***** Setting up nextcloud for ${DOMAIN_NAME}"
|
||||||
|
occ() { su -p www-data -s /bin/sh -c "php /var/www/html/occ $*" ; }
|
||||||
|
#occ maintenance:install || exit 1
|
||||||
|
|
||||||
|
PROVIDER="$(cat <<EOF
|
||||||
|
{
|
||||||
|
"custom_oidc": [
|
||||||
|
{
|
||||||
|
"name": "keycloak",
|
||||||
|
"title": "Keycloak",
|
||||||
|
"clientId": "nextcloud",
|
||||||
|
"clientSecret": "$NEXTCLOUD_CLIENT_SECRET",
|
||||||
|
"authorizeUrl": "$AUTH_URL",
|
||||||
|
"tokenUrl": "$TOKEN_URL",
|
||||||
|
"userInfoUrl": "$USERINFO_URL",
|
||||||
|
"logoutUrl": "$LOGOUT_URL",
|
||||||
|
"scope": "openid",
|
||||||
|
"groupsClaim": "roles",
|
||||||
|
"style": "keycloak",
|
||||||
|
"displayNameClaim": "",
|
||||||
|
"defaultGroup": ""
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
EOF
|
||||||
|
)"
|
||||||
|
|
||||||
|
for app in calendar sociallogin; do
|
||||||
|
if [ ! -r "$CANARY.$app" ]; then
|
||||||
|
echo >&2 "installing app $app"
|
||||||
|
occ app:install $app || exit 1
|
||||||
|
touch "$CANARY.$app"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
occ config:app:set sociallogin prevent_create_email_exists --value=1 || exit 1
|
||||||
|
occ config:app:set sociallogin update_profile_on_login --value=1 || exit 1
|
||||||
|
occ config:app:set sociallogin custom_providers --value=\'$PROVIDER\' || exit 1
|
||||||
|
|
||||||
|
touch "$CANARY"
|
||||||
|
exec "/entrypoint.sh" "$SERVER"
|
@ -0,0 +1,31 @@
|
|||||||
|
version: '3.9'
|
||||||
|
services:
|
||||||
|
nginx:
|
||||||
|
# image: nginx:1.21-alpine
|
||||||
|
build:
|
||||||
|
context: nginx
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
restart: always
|
||||||
|
#entrypoint: /bin/sh
|
||||||
|
container_name: nginx
|
||||||
|
ports:
|
||||||
|
- "80:80"
|
||||||
|
- "443:443"
|
||||||
|
- "8448:8448"
|
||||||
|
volumes:
|
||||||
|
- ./nginx/etc/includes:/etc/nginx/includes:ro
|
||||||
|
- ./nginx/etc/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||||
|
- ./nginx/default.conf:/etc/nginx/templates/default.conf.template:ro
|
||||||
|
- ./html:/var/www/html:ro
|
||||||
|
- ./data/nginx/certbot/www:/var/www/certbot:ro
|
||||||
|
- ./data/nginx/certbot/conf:/etc/letsencrypt:rw
|
||||||
|
- /home:/home:ro
|
||||||
|
env_file:
|
||||||
|
- env.production
|
||||||
|
|
||||||
|
certbot:
|
||||||
|
image: certbot/certbot
|
||||||
|
container_name: certbot
|
||||||
|
volumes:
|
||||||
|
- ./data/nginx/certbot/conf:/etc/letsencrypt
|
||||||
|
- ./data/nginx/certbot/www:/var/www/certbot
|
@ -0,0 +1,30 @@
|
|||||||
|
FROM alpine
|
||||||
|
RUN apk update
|
||||||
|
RUN echo "building" \
|
||||||
|
&& apk add \
|
||||||
|
nginx \
|
||||||
|
collectd \
|
||||||
|
collectd-nginx \
|
||||||
|
nginx-mod-http-vts \
|
||||||
|
gettext \
|
||||||
|
curl \
|
||||||
|
openssl \
|
||||||
|
&& mkdir -p \
|
||||||
|
/etc/nginx/modules-enabled \
|
||||||
|
/etc/nginx/conf.d \
|
||||||
|
/docker-entrypoint.d \
|
||||||
|
&& ln -sf /etc/nginx/modules/10_http_vts.conf /etc/nginx/modules-enabled \
|
||||||
|
# forward request and error logs to docker log collector
|
||||||
|
&& ln -sf /dev/stdout /var/log/nginx/access.log \
|
||||||
|
&& ln -sf /dev/stderr /var/log/nginx/error.log \
|
||||||
|
&& echo "Done"
|
||||||
|
|
||||||
|
COPY ["docker-entrypoint.d/*", "/docker-entrypoint.d/" ]
|
||||||
|
COPY ["docker-entrypoint.sh", "/" ]
|
||||||
|
|
||||||
|
ENTRYPOINT ["/docker-entrypoint.sh"]
|
||||||
|
EXPOSE 80
|
||||||
|
STOPSIGNAL SIGQUIT
|
||||||
|
CMD ["nginx", "-g", "daemon off;"]
|
||||||
|
|
||||||
|
|
@ -1,34 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
|
|
||||||
source ../env.production
|
|
||||||
source ./env.production
|
|
||||||
|
|
||||||
domain_args="-d $DOMAIN_NAME,$KEYCLOAK_HOSTNAME,$HEDGEDOC_HOSTNAME,$MASTODON_HOSTNAME,$NEXTCLOUD_HOSTNAME,$GRAFANA_HOSTNAME,$MATRIX_HOSTNAME,$GITEA_HOSTNAME,$MOBILIZON_HOSTNAME,$PIXELFED_HOSTNAME"
|
|
||||||
rsa_key_size=2048
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
# move the temp live directory away if
|
|
||||||
# this is the first time we've run anything here
|
|
||||||
if [ ! -d "../data/certbot/conf/accounts" ]; then
|
|
||||||
echo "deleting temp keys"
|
|
||||||
rm -rf ../data/certbot/conf/live
|
|
||||||
fi
|
|
||||||
|
|
||||||
docker-compose run --rm certbot \
|
|
||||||
certonly \
|
|
||||||
--webroot \
|
|
||||||
--webroot-path /var/www/certbot \
|
|
||||||
--email "admin@$DOMAIN_NAME" \
|
|
||||||
--rsa-key-size "$rsa_key_size" \
|
|
||||||
--agree-tos \
|
|
||||||
--no-eff-email \
|
|
||||||
--force-renewal \
|
|
||||||
$domain_args \
|
|
||||||
|| die "unable to renew!"
|
|
||||||
|
|
||||||
docker-compose exec nginx nginx -s reload
|
|
@ -1,28 +0,0 @@
|
|||||||
version: '3'
|
|
||||||
services:
|
|
||||||
nginx:
|
|
||||||
image: nginx:1.21-alpine
|
|
||||||
restart: always
|
|
||||||
ports:
|
|
||||||
- "80:80"
|
|
||||||
- "443:443"
|
|
||||||
volumes:
|
|
||||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
|
||||||
- ./nginx/templates:/etc/nginx/templates:ro
|
|
||||||
- ./nginx/includes:/etc/nginx/includes:ro
|
|
||||||
- ../html:/var/www/html:ro
|
|
||||||
- ../data/certbot/www:/var/www/certbot:ro
|
|
||||||
- ../data/certbot/conf:/etc/letsencrypt:ro
|
|
||||||
- ../data/nginx/cache:/data/nginx/cache:rw
|
|
||||||
- /home:/home:ro
|
|
||||||
env_file:
|
|
||||||
- ../env.production
|
|
||||||
- env.production
|
|
||||||
extra_hosts:
|
|
||||||
- "host.docker.internal:host-gateway"
|
|
||||||
|
|
||||||
certbot:
|
|
||||||
image: certbot/certbot
|
|
||||||
volumes:
|
|
||||||
- ../data/certbot/conf:/etc/letsencrypt
|
|
||||||
- ../data/certbot/www:/var/www/certbot
|
|
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh -x
|
||||||
|
touch /started
|
||||||
|
|
||||||
|
#cat >> /etc/collectd/collectd.conf <<EOF
|
||||||
|
cat /etc/collectd/collectd.conf - > /tmp/conf <<EOF
|
||||||
|
LoadPlugin nginx
|
||||||
|
<Plugin "nginx">
|
||||||
|
URL "http://localhost:80/nginx_status"
|
||||||
|
</Plugin>
|
||||||
|
EOF
|
||||||
|
|
||||||
|
#collectd
|
@ -0,0 +1,31 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
mkdir -p /data/nginx/cache
|
||||||
|
|
||||||
|
if [ -z "$DOMAIN_NAME" ]; then
|
||||||
|
DOMAIN_NAME="example.com"
|
||||||
|
fi
|
||||||
|
|
||||||
|
certdir="/etc/letsencrypt/live/${DOMAIN_NAME}"
|
||||||
|
|
||||||
|
if [ -r "$certdir/fullchain.pem" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "$certdir"
|
||||||
|
|
||||||
|
echo >&2 "$certdir: Creating temporary keys"
|
||||||
|
openssl req \
|
||||||
|
-x509 \
|
||||||
|
-newkey rsa:2048 \
|
||||||
|
-keyout "$certdir/privkey.pem" \
|
||||||
|
-out "$certdir/fullchain.pem" \
|
||||||
|
-sha256 \
|
||||||
|
-nodes \
|
||||||
|
-days 365 \
|
||||||
|
-subj "/CN=$DOMAIN_NAME'" \
|
||||||
|
|| exit 1
|
||||||
|
|
||||||
|
echo >&2 "$certdir: Generated temporary keys -- certbot needs to request real ones"
|
||||||
|
exit 0
|
||||||
|
|
@ -0,0 +1,39 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
ME=$(basename $0)
|
||||||
|
|
||||||
|
entrypoint_log() {
|
||||||
|
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
|
||||||
|
echo "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
auto_envsubst() {
|
||||||
|
local template_dir="${NGINX_ENVSUBST_TEMPLATE_DIR:-/etc/nginx/templates}"
|
||||||
|
local suffix="${NGINX_ENVSUBST_TEMPLATE_SUFFIX:-.template}"
|
||||||
|
local output_dir="${NGINX_ENVSUBST_OUTPUT_DIR:-/etc/nginx/conf.d}"
|
||||||
|
local filter="${NGINX_ENVSUBST_FILTER:-}"
|
||||||
|
|
||||||
|
local template defined_envs relative_path output_path subdir
|
||||||
|
defined_envs=$(printf '${%s} ' $(awk "END { for (name in ENVIRON) { print ( name ~ /${filter}/ ) ? name : \"\" } }" < /dev/null ))
|
||||||
|
[ -d "$template_dir" ] || return 0
|
||||||
|
if [ ! -w "$output_dir" ]; then
|
||||||
|
entrypoint_log "$ME: ERROR: $template_dir exists, but $output_dir is not writable"
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
find "$template_dir" -follow -type f -name "*$suffix" -print | while read -r template; do
|
||||||
|
relative_path="${template#$template_dir/}"
|
||||||
|
output_path="$output_dir/${relative_path%$suffix}"
|
||||||
|
subdir=$(dirname "$relative_path")
|
||||||
|
# create a subdirectory where the template file exists
|
||||||
|
mkdir -p "$output_dir/$subdir"
|
||||||
|
entrypoint_log "$ME: Running envsubst on $template to $output_path"
|
||||||
|
envsubst "$defined_envs" < "$template" > "$output_path"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
auto_envsubst
|
||||||
|
|
||||||
|
exit 0
|
@ -0,0 +1,47 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# vim:sw=4:ts=4:et
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
entrypoint_log() {
|
||||||
|
if [ -z "${NGINX_ENTRYPOINT_QUIET_LOGS:-}" ]; then
|
||||||
|
echo "$@"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$1" = "nginx" -o "$1" = "nginx-debug" ]; then
|
||||||
|
if /usr/bin/find "/docker-entrypoint.d/" -mindepth 1 -maxdepth 1 -type f -print -quit 2>/dev/null | read v; then
|
||||||
|
entrypoint_log "$0: /docker-entrypoint.d/ is not empty, will attempt to perform configuration"
|
||||||
|
|
||||||
|
entrypoint_log "$0: Looking for shell scripts in /docker-entrypoint.d/"
|
||||||
|
find "/docker-entrypoint.d/" -follow -type f -print | sort -V | while read -r f; do
|
||||||
|
case "$f" in
|
||||||
|
*.envsh)
|
||||||
|
if [ -x "$f" ]; then
|
||||||
|
entrypoint_log "$0: Sourcing $f";
|
||||||
|
. "$f"
|
||||||
|
else
|
||||||
|
# warn on shell scripts without exec bit
|
||||||
|
entrypoint_log "$0: Ignoring $f, not executable";
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*.sh)
|
||||||
|
if [ -x "$f" ]; then
|
||||||
|
entrypoint_log "$0: Launching $f";
|
||||||
|
"$f"
|
||||||
|
else
|
||||||
|
# warn on shell scripts without exec bit
|
||||||
|
entrypoint_log "$0: Ignoring $f, not executable";
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
*) entrypoint_log "$0: Ignoring $f";;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
entrypoint_log "$0: Configuration complete; ready for start up"
|
||||||
|
else
|
||||||
|
entrypoint_log "$0: No files found in /docker-entrypoint.d/, skipping configuration"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
exec "$@"
|
@ -1,30 +0,0 @@
|
|||||||
server {
|
|
||||||
server_name ${PIXELFED_HOSTNAME};
|
|
||||||
client_max_body_size 128m;
|
|
||||||
|
|
||||||
sendfile on;
|
|
||||||
tcp_nopush on;
|
|
||||||
tcp_nodelay on;
|
|
||||||
keepalive_timeout 65;
|
|
||||||
types_hash_max_size 2048;
|
|
||||||
|
|
||||||
gzip on;
|
|
||||||
gzip_disable "msie6";
|
|
||||||
|
|
||||||
proxy_read_timeout 1800s;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_pass http://host.docker.internal:8090;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
}
|
|
||||||
|
|
||||||
listen 443 ssl;
|
|
||||||
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
|
||||||
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
|
|
||||||
include /etc/nginx/includes/options-ssl-nginx.conf;
|
|
||||||
include /etc/nginx/includes/challenge.conf;
|
|
||||||
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
|
|
||||||
}
|
|
@ -1,41 +0,0 @@
|
|||||||
map $http_upgrade $connection_upgrade {
|
|
||||||
default upgrade;
|
|
||||||
'' close;
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
server_name social.${DOMAIN_NAME};
|
|
||||||
client_max_body_size 128m;
|
|
||||||
|
|
||||||
location / {
|
|
||||||
proxy_pass http://host.docker.internal:6001;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Forwarded-For $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-Proto https;
|
|
||||||
}
|
|
||||||
|
|
||||||
location /api/v1/streaming {
|
|
||||||
proxy_pass http://host.docker.internal:4000;
|
|
||||||
proxy_set_header Host $host;
|
|
||||||
proxy_set_header X-Real-IP $remote_addr;
|
|
||||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
|
||||||
proxy_set_header X-Forwarded-Proto $scheme;
|
|
||||||
proxy_set_header Upgrade $http_upgrade;
|
|
||||||
proxy_set_header Connection $connection_upgrade;
|
|
||||||
|
|
||||||
proxy_buffering off;
|
|
||||||
proxy_redirect off;
|
|
||||||
proxy_http_version 1.1;
|
|
||||||
tcp_nodelay on;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
listen 443 ssl;
|
|
||||||
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
|
|
||||||
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
|
|
||||||
include /etc/nginx/includes/options-ssl-nginx.conf;
|
|
||||||
include /etc/nginx/includes/challenge.conf;
|
|
||||||
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
DIRNAME="$(dirname $0)"
|
|
||||||
cd "$DIRNAME"
|
|
||||||
|
|
||||||
source ../env.production || die "no top level env"
|
|
||||||
source env.production || die "no local env"
|
|
||||||
|
|
||||||
if [ -z "${DOMAIN_NAME}" ]; then
|
|
||||||
die "DOMAIN_NAME not set"
|
|
||||||
fi
|
|
||||||
|
|
||||||
certdir="../data/certbot/conf/live/${DOMAIN_NAME}"
|
|
||||||
|
|
||||||
if [ -r "$certdir/privkey.pem" ]; then
|
|
||||||
docker-compose up -d || die "nginx: unable to start"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir -p "$certdir" || die "$certdir: unable to make"
|
|
||||||
|
|
||||||
openssl req \
|
|
||||||
-x509 \
|
|
||||||
-newkey rsa:2048 \
|
|
||||||
-keyout "$certdir/privkey.pem" \
|
|
||||||
-out "$certdir/fullchain.pem" \
|
|
||||||
-sha256 \
|
|
||||||
-nodes \
|
|
||||||
-days 365 \
|
|
||||||
-subj "/CN=${DOMAIN_NAME}'" \
|
|
||||||
|| die "$certdir/privkey.pem: unable to create temp key"
|
|
||||||
|
|
||||||
docker-compose up -d || die "unable to bring up nginx"
|
|
||||||
|
|
||||||
echo "SLEEPING..."
|
|
||||||
sleep 10
|
|
||||||
|
|
||||||
./certbot-renew || die "unable to create certs"
|
|
@ -0,0 +1,23 @@
|
|||||||
|
version: '3'
|
||||||
|
services:
|
||||||
|
prometheus:
|
||||||
|
image: prom/prometheus
|
||||||
|
restart: always
|
||||||
|
container_name: prometheus
|
||||||
|
user: root
|
||||||
|
volumes:
|
||||||
|
- ./data/prometheus/storage:/prometheus:rw
|
||||||
|
- ./prometheus/prometheus.yaml:/etc/prometheus/prometheus.yml:ro
|
||||||
|
- ./prometheus/entrypoint.sh:/entrypoint.sh:ro
|
||||||
|
entrypoint: ["/entrypoint.sh"]
|
||||||
|
|
||||||
|
cadvisor:
|
||||||
|
image: gcr.io/cadvisor/cadvisor:latest
|
||||||
|
restart: always
|
||||||
|
container_name: cadvisor
|
||||||
|
volumes:
|
||||||
|
- /:/rootfs:ro
|
||||||
|
- /var/run:/var/run:rw
|
||||||
|
- /sys:/sys:ro
|
||||||
|
- /var/lib/docker/:/var/lib/docker:ro
|
||||||
|
|
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/sh -x
|
||||||
|
|
||||||
|
chmod 777 /prometheus
|
||||||
|
exec su -s /bin/sh nobody <<EOF
|
||||||
|
exec /bin/prometheus \
|
||||||
|
--config.file=/etc/prometheus/prometheus.yml \
|
||||||
|
--web.console.libraries=/etc/prometheus/console_libraries \
|
||||||
|
--web.console.templates=/etc/prometheus/consoles
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# --storage.local.path=/prometheus \
|
@ -0,0 +1,26 @@
|
|||||||
|
global:
|
||||||
|
scrape_interval: 15s
|
||||||
|
external_labels:
|
||||||
|
monitor: 'codelab-monitor'
|
||||||
|
|
||||||
|
scrape_configs:
|
||||||
|
# nginx vts data
|
||||||
|
- job_name: 'nginx'
|
||||||
|
scrape_interval: 5s
|
||||||
|
metrics_path: "/status/format/prometheus"
|
||||||
|
static_configs:
|
||||||
|
- targets: ['nginx:80']
|
||||||
|
- job_name: 'metrics'
|
||||||
|
scrape_interval: 5s
|
||||||
|
static_configs:
|
||||||
|
# grafana data from /metrics
|
||||||
|
- targets: ['dashboard:3000']
|
||||||
|
# host running the docker-compose
|
||||||
|
- targets: ['172.17.0.1:9100']
|
||||||
|
# cadvisor system
|
||||||
|
- targets: ['cadvisor:8080']
|
||||||
|
- job_name: "synapse"
|
||||||
|
scrape_interval: 15s
|
||||||
|
metrics_path: "/_synapse/metrics"
|
||||||
|
static_configs:
|
||||||
|
- targets: ["matrix-synapse:9000"]
|
@ -1,39 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
die() { echo >&2 "$@" ; exit 1 ; }
|
|
||||||
|
|
||||||
which jq > /dev/null || die "jq not installed?"
|
|
||||||
which docker-compose > /dev/null || die "docker-compose not installed?"
|
|
||||||
|
|
||||||
source ./env.production || die "no production env?"
|
|
||||||
|
|
||||||
if [ -z "$DOMAIN_NAME" ]; then
|
|
||||||
die "\$DOMAIN_NAME not set; things will break"
|
|
||||||
fi
|
|
||||||
|
|
||||||
SERVICES=nginx # there is no host
|
|
||||||
SERVICES+=\ keycloak
|
|
||||||
SERVICES+=\ hedgedoc
|
|
||||||
SERVICES+=\ nextcloud
|
|
||||||
SERVICES+=\ mastodon
|
|
||||||
SERVICES+=\ grafana
|
|
||||||
SERVICES+=\ matrix
|
|
||||||
SERVICES+=\ gitea
|
|
||||||
SERVICES+=\ mobilizon
|
|
||||||
|
|
||||||
HOSTS+=\ $KEYCLOAK_HOST
|
|
||||||
HOSTS+=\ $HEDGEDOC_HOST
|
|
||||||
HOSTS+=\ $NEXTCLOUD_HOST
|
|
||||||
HOSTS+=\ $MASTODON_HOST
|
|
||||||
HOSTS+=\ $GRAFANA_HOST
|
|
||||||
HOSTS+=\ $MATRIX_HOST
|
|
||||||
HOSTS+=\ $GITEA_HOST
|
|
||||||
HOSTS+=\ $MOBILIZON_HOST
|
|
||||||
|
|
||||||
for host in $HOSTS ; do
|
|
||||||
host $host > /dev/null || die "$host: DNS entry not present?"
|
|
||||||
done
|
|
||||||
|
|
||||||
for service in $SERVICES ; do
|
|
||||||
echo "$service: starting"
|
|
||||||
./$service/setup || die "$server: failed to start"
|
|
||||||
done
|
|
@ -1,7 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
for file in */docker-compose.yaml ; do
|
|
||||||
dir="$(dirname "$file")"
|
|
||||||
echo "$dir"
|
|
||||||
( cd "$dir" ; docker-compose down )
|
|
||||||
done
|
|
@ -0,0 +1,29 @@
|
|||||||
|
# Wireguard proxy setup
|
||||||
|
|
||||||
|
This is for a server that is inside of a firewall or behind a NAT gateway
|
||||||
|
that doesn't have a static IP address. A cheap $6/month DigitalOcean droplet
|
||||||
|
can be created that will route *all* internet traffic to the server, allowing
|
||||||
|
it to change IP.
|
||||||
|
|
||||||
|
* On both proxy and the server:
|
||||||
|
|
||||||
|
```
|
||||||
|
sudo apt install wireguard-tools net-tools
|
||||||
|
wg genkey \
|
||||||
|
| sudo tee /etc/wireguard/wg0.key \
|
||||||
|
| wg pubkey \
|
||||||
|
| sudo tee /etc/wireguard/wg0.pub
|
||||||
|
sudo chmod -R go-rwx /etc/wireguard
|
||||||
|
```
|
||||||
|
|
||||||
|
* Copy `wireguard/wg0-proxy.conf` to `/etc/wireguard/wg0.conf` on the proxy
|
||||||
|
* On the **proxy** edit `/etc/wireguard/wg0.conf`:
|
||||||
|
* Change `${SERVER_PUBKEY}` to the public key that was output on the server
|
||||||
|
|
||||||
|
* Copy `wireguard/wg0-server.conf` to `/etc/wireguard/wg0.conf` on the server.
|
||||||
|
* On the **server** edit `/etc/wireguard/wg0.conf`:
|
||||||
|
* Change `${PROXY_IP}` to the public IP address of the proxy (two places)
|
||||||
|
* Change `${PROXY_PUBKEY}` to the public key output on the proxy (two places)
|
||||||
|
* Change `${SERVER_GW}` to the gateway address used to reach the internet from the server
|
||||||
|
|
||||||
|
* On both machines run `sudo wg-quick up /etc/wireguard/wg0.conf`
|
@ -0,0 +1,33 @@
|
|||||||
|
[Interface]
|
||||||
|
Address = 192.168.4.1/24
|
||||||
|
ListenPort = 51820
|
||||||
|
|
||||||
|
PostUp = wg set %i private-key /etc/wireguard/%i.key
|
||||||
|
|
||||||
|
# Enable IP masquerading for the remote host
|
||||||
|
PostUp = echo 1 > /proc/sys/net/ipv4/ip_forward
|
||||||
|
PostUp = iptables -A FORWARD -i %i -j ACCEPT
|
||||||
|
PostUp = iptables -A FORWARD -o %i -j ACCEPT
|
||||||
|
PostUp = iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
|
# accept the wireguard connection
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i eth0 -p udp --dport 51820 -j ACCEPT
|
||||||
|
|
||||||
|
# redirect ssh to port 23
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i eth0 -p tcp --dport 23 -j REDIRECT --to-port 22
|
||||||
|
|
||||||
|
# redirect *all* traffic to the wg tunnel
|
||||||
|
PostUp = iptables -t nat -A PREROUTING -i eth0 -p all -j DNAT --to-destination 192.168.4.2
|
||||||
|
|
||||||
|
# Tear down the proxy
|
||||||
|
PostDown = iptables -D FORWARD -i %i -j ACCEPT
|
||||||
|
PostDown = iptables -D FORWARD -o %i -j ACCEPT
|
||||||
|
PostDown = iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
|
||||||
|
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i eth0 -p udp --dport 51820 -j ACCEPT
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i eth0 -p tcp --dport 23 -j REDIRECT -to-port 22
|
||||||
|
PostDown = iptables -t nat -D PREROUTING -i eth0 -p all -j DNAT --to-destination 192.168.4.2
|
||||||
|
|
||||||
|
[Peer]
|
||||||
|
PublicKey = ${SERVER_PUBKEY}
|
||||||
|
AllowedIPs = 192.168.4.2/32
|
@ -0,0 +1,28 @@
|
|||||||
|
# wg0-server.conf
|
||||||
|
#
|
||||||
|
# This is the configuration for the server hidden behind the wireguard proxy.
|
||||||
|
# It routes all internet traffic via the proxy, with the exception of traffic
|
||||||
|
# to the proxy itself. It is still accessible on the local network.
|
||||||
|
#
|
||||||
|
# When moving this to a new machine:
|
||||||
|
# * Update the PostUp route so that the proxy address has an explicit route via the local gateway
|
||||||
|
# * Update the PownDown to delete the explicit route and restore the default gw
|
||||||
|
# * Update the Peer PublicKey and Endpoint with the proxy key and address
|
||||||
|
#
|
||||||
|
[Interface]
|
||||||
|
PostUp = wg set %i private-key /etc/wireguard/%i.key
|
||||||
|
Address = 192.168.4.2/24
|
||||||
|
|
||||||
|
# Delete the default gateway and add an explicit route for the wireguard tunnel
|
||||||
|
PostUp = route add ${PROXY_IP} gw ${SERVER_GW} || echo "wrong route"
|
||||||
|
PostUp = route del default || echo "no default"
|
||||||
|
PostUp = route add default gw 192.168.4.1
|
||||||
|
|
||||||
|
PostDown = route del ${PROXY_IP}
|
||||||
|
PostDown = route add default gw ${SERVER_GW}
|
||||||
|
|
||||||
|
[Peer]
|
||||||
|
PublicKey = ${PROXY_PUBKEY}
|
||||||
|
Endpoint = ${PROXY_IP}:51820
|
||||||
|
AllowedIPs = 0.0.0.0/0
|
||||||
|
PersistentKeepalive = 25
|
Loading…
Reference in new issue