Compare commits

...

No commits in common. 'main' and 'bookstack' have entirely different histories.

9
.gitignore vendored

@ -1,3 +1,8 @@
/secrets/
/data/
.*.swp
data
*.secrets
env.smtp
backups
secrets
config.exs
env.production

@ -0,0 +1,19 @@
# hackerspace.zone
Infrastructure for the self-hosted, single-sign-on, community-run services.
* Set the domain name in `env.production`
* Create the DNS entries in the domain for `login`, `cloud`, `matrix`, `dashboard`, `docs` and maybe more.
* Install dependencies:
```
apt install jq docker compose
```
* Setup each of the services. `keycloak` and `nginx` are required to start the others:
```
./keycloak/setup
./nginx/setup
./start-all
```

@ -0,0 +1,17 @@
services:
pages:
image: caddy
volumes:
- ../data/pages/srv:/srv:ro
- ./pages.Caddyfile:/etc/caddy/Caddyfile:ro
environment:
- POSTGRES_USER=hedgedoc
- POSTGRES_PASSWORD=password
- POSTGRES_DB=hedgedoc
restart: always
env_file:
- ../env.production
- env.production
- ../data/hedgedoc/env.secrets
labels:
- "diun.enable=true"

@ -0,0 +1,2 @@
CMD_OAUTH2_CLIENT_SECRET=abcdef1234
CMD_SESSION_SECRET=abcdef1234

@ -0,0 +1,69 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top levle env?"
source env.production || die "no local env?"
DATA="../data/hedgedoc"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "hedgedoc: unable to start"
exit 0
fi
docker compose down 2>/dev/null
# regenerate the client secrets
CLIENT_SECRET="$(openssl rand -hex 20)"
SESSION_SECRET="$(openssl rand -hex 20)"
mkdir -p "$DATA/uploads"
chmod 666 "$DATA/uploads"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
CMD_OAUTH2_CLIENT_SECRET=$CLIENT_SECRET
CMD_SESSION_SECRET=$SESSION_SECRET
CMD_DOMAIN=${HEDGEDOC_HOSTNAME}
CMD_OAUTH2_AUTHORIZATION_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/auth
CMD_OAUTH2_TOKEN_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/token
CMD_OAUTH2_USER_PROFILE_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/userinfo
CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
CMD_OAUTH2_CLIENT_ID=hedgedoc
CMD_OAUTH2_PROVIDERNAME=Keycloak
EOF
../keycloak/client-delete hedgedoc
../keycloak/client-create <<EOF || die "unable to create hedgedoc client"
{
"clientId": "hedgedoc",
"rootUrl": "https://$HEDGEDOC_HOSTNAME",
"adminUrl": "https://$HEDGEDOC_HOSTNAME",
"redirectUris": [ "https://$HEDGEDOC_HOSTNAME/*" ],
"webOrigins": [ "https://$HEDGEDOC_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET",
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"id",
"email"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"microprofile-jwt"
]
}
EOF
docker compose up -d || die "hedgedoc: unable to start container"

@ -0,0 +1,113 @@
# Mobilizon instance configuration
import Config
listen_ip = System.get_env("MOBILIZON_INSTANCE_LISTEN_IP", "0.0.0.0")
listen_ip =
case listen_ip |> to_charlist() |> :inet.parse_address() do
{:ok, listen_ip} -> listen_ip
_ -> raise "MOBILIZON_INSTANCE_LISTEN_IP does not match the expected IP format."
end
config :mobilizon, Mobilizon.Web.Endpoint,
server: true,
url: [host: System.get_env("MOBILIZON_INSTANCE_HOST", "mobilizon.lan")],
http: [
port: String.to_integer(System.get_env("MOBILIZON_INSTANCE_PORT", "4000")),
ip: listen_ip
],
secret_key_base: System.get_env("MOBILIZON_INSTANCE_SECRET_KEY_BASE", "changethis")
config :mobilizon, Mobilizon.Web.Auth.Guardian,
secret_key: System.get_env("MOBILIZON_INSTANCE_SECRET_KEY", "changethis")
config :mobilizon, :instance,
name: System.get_env("MOBILIZON_INSTANCE_NAME", "Mobilizon"),
description: "Change this to a proper description of your instance",
hostname: System.get_env("MOBILIZON_INSTANCE_HOST", "mobilizon.lan"),
registrations_open: System.get_env("MOBILIZON_INSTANCE_REGISTRATIONS_OPEN", "false") == "true",
demo: false,
allow_relay: true,
federating: true,
email_from: System.get_env("MOBILIZON_INSTANCE_EMAIL", "woodbine@example.com"),
email_reply_to: System.get_env("MOBILIZON_REPLY_EMAIL", "woodbine@example.com")
config :mobilizon, Mobilizon.Storage.Repo,
adapter: Ecto.Adapters.Postgres,
username: System.get_env("MOBILIZON_DATABASE_USERNAME", "username"),
password: System.get_env("MOBILIZON_DATABASE_PASSWORD", "password"),
database: System.get_env("MOBILIZON_DATABASE_DBNAME", "mobilizon"),
hostname: System.get_env("MOBILIZON_DATABASE_HOST", "postgres"),
port: 5432,
pool_size: 10
config :mobilizon, Mobilizon.Web.Email.Mailer,
adapter: Swoosh.Adapters.SMTP,
relay: System.get_env("MOBILIZON_SMTP_SERVER", "mail.example.com"),
port: System.get_env("MOBILIZON_SMTP_PORT", "587"),
username: System.get_env("MOBILIZON_SMTP_USERNAME", "username"),
password: System.get_env("MOBILIZON_SMTP_PASSWORD", "password"),
tls: :if_available,
allowed_tls_versions: [:tlsv1, :"tlsv1.1", :"tlsv1.2"],
ssl: System.get_env("MOBILIZON_SMTP_SSL", "false"),
retries: 2,
no_mx_lookups: false,
auth: :always
config :geolix,
databases: [
%{
id: :city,
adapter: Geolix.Adapter.MMDB2,
source: "/var/lib/mobilizon/geo_db/GeoLite2-City.mmdb"
}
]
config :mobilizon, Mobilizon.Web.Upload.Uploader.Local,
uploads: System.get_env("MOBILIZON_UPLOADS", "/var/lib/mobilizon/uploads")
config :mobilizon, :exports,
path: System.get_env("MOBILIZON_UPLOADS_EXPORTS", "/var/lib/mobilizon/uploads/exports"),
formats: [
Mobilizon.Service.Export.Participants.CSV,
Mobilizon.Service.Export.Participants.PDF,
Mobilizon.Service.Export.Participants.ODS
]
config :tz_world,
data_dir: System.get_env("MOBILIZON_TIMEZONES_DIR", "/var/lib/mobilizon/timezones")
#
# keycloak config for hackerspace.zone self hosted single-sign-on
#
keycloak_hostname = System.get_env("KEYCLOAK_HOSTNAME", "keycloak.example.com")
keycloak_realm = System.get_env("REALM", "example")
keycloak_secret = System.get_env("MOBILIZON_CLIENT_SECRET", "abcdef1234")
keycloak_url = "https://#{keycloak_hostname}/realms/#{keycloak_realm}"
config :ueberauth,
Ueberauth,
providers: [
keycloak: {Ueberauth.Strategy.Keycloak, [default_scope: "openid"]}
]
config :mobilizon, :auth,
oauth_consumer_strategies: [
{:keycloak, "#{keycloak_hostname}"}
]
config :ueberauth, Ueberauth.Strategy.Keycloak.OAuth,
client_id: "mobilizon",
client_secret: keycloak_secret,
site: keycloak_url,
authorize_url: "#{keycloak_url}/protocol/openid-connect/auth",
token_url: "#{keycloak_url}/protocol/openid-connect/token",
userinfo_url: "#{keycloak_url}/protocol/openid-connect/userinfo",
token_method: :post
config :web_push_encryption, :vapid_details,
subject: "mailto:mail@example.net",
public_key: "public_key",
private_key: "private_key"

@ -0,0 +1,28 @@
version: "3"
services:
mobilizon:
image: framasoft/mobilizon:4.1.0
restart: always
env_file:
- ../env.production
- ./env.production
- ../data/mobilizon/env.secrets
volumes:
- ../data/mobilizon/uploads:/var/lib/mobilizon/uploads
- ./config.exs:/etc/mobilizon/config.exs:ro
# - ${PWD}/GeoLite2-City.mmdb:/var/lib/mobilizon/geo_db/GeoLite2-City.mmdb
ports:
- "7000:7000"
labels:
- "diun.enable=true"
db:
image: postgis/postgis:13-3.1
restart: always
volumes:
- ../data/mobilizon/db:/var/lib/postgresql/data
environment:
- POSTGRES_USER=mobilizon
- POSTGRES_PASSWORD=mobilizon
- POSTGRES_DB=mobilizon

@ -0,0 +1,24 @@
# Database settings
POSTGRES_USER=mobilizon
POSTGRES_PASSWORD=changethis
POSTGRES_DB=mobilizon
MOBILIZON_DATABASE_USERNAME=mobilizon
MOBILIZON_DATABASE_PASSWORD=mobilizon
MOBILIZON_DATABASE_DBNAME=mobilizon
MOBILIZON_DATABASE_HOST=db
# Instance configuration
MOBILIZON_INSTANCE_REGISTRATIONS_OPEN=false
MOBILIZON_INSTANCE_PORT=7000
MOBILIZON_INSTANCE_EMAIL=noreply@mobilizon.lan
MOBILIZON_REPLY_EMAIL=contact@mobilizon.lan
# Email settings
MOBILIZON_SMTP_SERVER=localhost
MOBILIZON_SMTP_PORT=25
MOBILIZON_SMTP_HOSTNAME=localhost
MOBILIZON_SMTP_USERNAME=noreply@mobilizon.lan
MOBILIZON_SMTP_PASSWORD=password
MOBILIZON_SMTP_SSL=false

@ -0,0 +1,62 @@
#!/bin/bash
die() { echo >&2 "mobilizon: $@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
source ../env.smtp 2>/dev/null
DATA="../data/mobilizon"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "unable to start"
exit 0
fi
docker compose down 2>/dev/null
CLIENT_SECRET="$(openssl rand -hex 20)"
mkdir -p "$DATA/uploads"
chmod 777 "$DATA/uploads"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
MOBILIZON_INSTANCE_NAME=${DOMAIN_NAME}
MOBILIZON_INSTANCE_HOST=${MOBILIZON_HOSTNAME}
MOBILIZON_INSTANCE_SECRET_KEY_BASE=$(openssl rand -hex 20)
MOBILIZON_INSTANCE_SECRET_KEY=$(openssl rand -hex 20)
MOBILIZON_CLIENT_SECRET=${CLIENT_SECRET}
EOF
if [ -n "$SMTP_SERVER" ]; then
cat <<EOF >> "$SECRETS"
MOBILIZON_INSTANCE_EMAIL=events@${DOMAIN_NAME}
MOBILIZON_REPLY_EMAIL=noreply@${DOMAIN_NAME}
MOBILIZON_SMTP_SERVER=${SMTP_SERVER}
MOBILIZON_SMTP_PORT=${SMTP_PORT}
MOBILIZON_SMTP_USERNAME=${SMTP_USER}
MOBILIZON_SMTP_PASSWORD=${SMTP_PASSWORD}
EOF
fi
../keycloak/client-delete mobilizon
../keycloak/client-create <<EOF || die "unable to create client"
{
"clientId": "mobilizon",
"rootUrl": "https://$MOBILIZON_HOSTNAME",
"adminUrl": "https://$MOBILIZON_HOSTNAME",
"redirectUris": [ "https://$MOBILIZON_HOSTNAME/*" ],
"webOrigins": [ "https://$MOBILIZON_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET"
}
EOF
docker compose up -d || die "unable to start container"

@ -0,0 +1,18 @@
secrets:
DIUN_NOTIF_MATRIX_PASSWORD:
file: ../secrets/diun/DIUN_NOTIF_MATRIX_PASSWORD
services:
diun:
image: crazymax/diun:4
restart: always
command: serve
secrets: [ DIUN_NOTIF_MATRIX_PASSWORD ]
env_file:
- ../env.production
- ./env.production
volumes:
- ../data/diun/data:/data
- /var/run/docker.sock:/var/run/docker.sock
labels:
- "diun.enable=true"

@ -0,0 +1,13 @@
TZ=America/New_York
DIUN_WATCH_WORKERS=20
DIUN_WATCH_SCHEDULE="0 */6 * * *"
DIUN_WATCH_JITTER=30s
DIUN_PROVIDERS_DOCKER=true
DIUN_WATCH_FIRSTCHECKNOTIF=true
DIUN_NOTIF_MATRIX_USER="@diun:woodbine.nyc"
DIUN_NOTIF_MATRIX_PASSWORDFILE=/run/secrets/DIUN_NOTIF_MATRIX_PASSWORD
DIUN_NOTIF_MATRIX_HOMESERVERURL=https://chat.woodbine.nyc
DIUN_NOTIF_MATRIX_ROOMID="!BDdgmKzNkJfHGohOIN:woodbine.nyc"
DIUN_NOTIF_DISCORD_WEBHOOKURL="https://discord.com/api/webhooks/1223645760005079203/JdqDR5IwaOe08Cdfp2gO97XNSfgdovQYNTFIZlstSCdmgyaisGYKjHKaUceD6oR2QoKO"

@ -0,0 +1,10 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
docker compose down 2>/dev/null
docker compose up -d || die "diun: unable to bring up container"

@ -0,0 +1,11 @@
services:
filestash:
container_name: filestash
image: machines/filestash
restart: always
environment:
- APPLICATION_URL=files.woodbine.nyc
ports:
- "8334:8334"
volumes:
- ../data/filestash/state:/app/data/state/

@ -0,0 +1,24 @@
services:
grafana:
image: grafana/grafana-oss:8.5.1
user: "0:0"
environment:
GF_AUTH_GENERIC_OAUTH_ENABLED: 'True'
GF_AUTH_GENERIC_OAUTH_ALLOW_SIGN_UP: 'True' # otherwise no login is possible
#GF_AUTH_GENERIC_OAUTH_TEAM_IDS: ''
#GF_AUTH_GENERIC_OAUTH_ALLOWED_ORGANIZATIONS: ''
#GF_AUTH_GENERIC_OAUTH_ALLOWED_DOMAINS: '<domains>'
GF_AUTH_GENERIC_OAUTH_NAME: Keycloak
GF_AUTH_GENERIC_OAUTH_CLIENT_ID: grafana
GF_AUTH_GENERIC_OAUTH_SCOPES: openid profile email
# GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET is in env.secrets
# auth URLs are in the env.secrets since they have hostname expansion
volumes:
- ../data/grafana:/var/lib/grafana
restart: always
ports:
- 8000:3000
env_file:
- ../env.production
- env.production
- ../data/grafana/env.secrets

@ -0,0 +1,50 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
BASE="https://$KEYCLOAK_HOSTNAME/realms/$REALM/protocol/openid-connect"
SECRETS="../data/grafana/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "grafana: unable to start container"
exit 0
fi
docker compose down 2>/dev/null
GRAFANA_CLIENT_SECRET="$(openssl rand -hex 32)"
GRAFANA_ADMIN_PASSWORD="$(openssl rand -hex 4)"
echo "Generating secrets: admin password $GRAFANA_ADMIN_PASSWORD"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# Do not check in!
GF_SECURITY_ADMIN_PASSWORD=$GRAFANA_ADMIN_PASSWORD
GF_SERVER_ROOT_URL=https://$GRAFANA_HOSTNAME/
GF_SERVER_DOMAIN=$GRAFANA_HOSTNAME
GF_AUTH_GENERIC_OAUTH_AUTH_URL=$BASE/auth
GF_AUTH_GENERIC_OAUTH_TOKEN_URL=$BASE/token
GF_AUTH_GENERIC_OAUTH_API_URL=$BASE/userinfo
GF_AUTH_GENERIC_OAUTH_CLIENT_SECRET=$GRAFANA_CLIENT_SECRET
EOF
../keycloak/client-delete 'grafana' 2>/dev/null
../keycloak/client-create << EOF || die "unable to create client id"
{
"clientId": "grafana",
"rootUrl": "https://$GRAFANA_HOSTNAME/",
"adminUrl": "https://$GRAFANA_HOSTNAME/",
"redirectUris": [ "https://$GRAFANA_HOSTNAME/*" ],
"webOrigins": [ "https://$GRAFANA_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$GRAFANA_CLIENT_SECRET"
}
EOF
docker compose up -d || die "grafana: unable to bring up container"

@ -0,0 +1,126 @@
'use strict'
const archiver = require('archiver')
const sanitizeFilename = require('sanitize-filename')
const async = require('async')
const Router = require('express').Router
const errors = require('../errors')
const config = require('../config')
const models = require('../models')
const logger = require('../logger')
const { generateAvatar } = require('../letter-avatars')
const UserRouter = module.exports = Router()
// get me info
UserRouter.get('/me', function (req, res) {
if (req.isAuthenticated()) {
models.User.findOne({
where: {
id: req.user.id
}
}).then(function (user) {
if (!user) { return errors.errorNotFound(res) }
const profile = models.User.getProfile(user)
res.send({
status: 'ok',
id: req.user.id,
name: profile.name,
photo: profile.photo
})
}).catch(function (err) {
logger.error('read me failed: ' + err)
return errors.errorInternalError(res)
})
} else {
res.send({
status: 'forbidden'
})
}
})
// delete the currently authenticated user
UserRouter.get('/me/delete/:token?', function (req, res) {
if (req.isAuthenticated()) {
models.User.findOne({
where: {
id: req.user.id
}
}).then(function (user) {
if (!user) {
return errors.errorNotFound(res)
}
if (user.deleteToken === req.params.token) {
user.destroy().then(function () {
res.redirect(config.serverURL + '/')
})
} else {
return errors.errorForbidden(res)
}
}).catch(function (err) {
logger.error('delete user failed: ' + err)
return errors.errorInternalError(res)
})
} else {
return errors.errorForbidden(res)
}
})
// export the data of the authenticated user
UserRouter.get('/me/export', function (req, res) {
if (req.isAuthenticated()) {
// let output = fs.createWriteStream(__dirname + '/example.zip');
const archive = archiver('zip', {
zlib: { level: 3 } // Sets the compression level.
})
res.setHeader('Content-Type', 'application/zip')
res.attachment('archive.zip')
archive.pipe(res)
archive.on('error', function (err) {
logger.error('export user data failed: ' + err)
return errors.errorInternalError(res)
})
models.User.findOne({
where: {
id: req.user.id
}
}).then(function (user) {
models.Note.findAll().then(function (notes) {
const filenames = {}
async.each(notes, function (note, callback) {
const basename = sanitizeFilename(note.title, { replacement: '_' })
let filename
let suffix = ''
do {
const seperator = typeof suffix === 'number' ? '-' : ''
filename = basename + seperator + suffix + '.md'
suffix++
} while (filenames[filename])
filenames[filename] = true
logger.debug('Write: ' + filename)
archive.append(Buffer.from(note.content), { name: filename, date: note.lastchangeAt })
callback(null, null)
}, function (err) {
if (err) {
return errors.errorInternalError(res)
}
archive.finalize()
})
})
}).catch(function (err) {
logger.error('export user data failed: ' + err)
return errors.errorInternalError(res)
})
} else {
return errors.errorForbidden(res)
}
})
UserRouter.get('/user/:username/avatar.svg', function (req, res, next) {
res.setHeader('Content-Type', 'image/svg+xml')
res.setHeader('Cache-Control', 'public, max-age=86400')
res.send(generateAvatar(req.params.username))
})

@ -0,0 +1 @@
echo "libib is just a redirect to the externally hosted library"

@ -0,0 +1,31 @@
services:
manyfold:
image: ghcr.io/manyfold3d/manyfold:latest
ports:
- 3214:3214
env_file:
- ./env.production
- ../env.production
- ../data/manyfold/env.secrets
volumes:
- ../data/manyfold/libraries:/libraries
environment:
REDIS_URL: redis://redis:6379/1
labels:
- "diun.enable=true"
depends_on:
- db
- redis
restart: always
db:
image: postgres:15
volumes:
- ../data/manyfold/database:/var/lib/postgresql/data
env_file:
- ../data/manyfold/env.secrets
restart: on-failure
redis:
image: redis:7
restart: on-failure

@ -0,0 +1,2 @@
REGISTRATION=true
MULTIUSER=true

@ -0,0 +1,9 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
docker compose down 2>/dev/null
docker compose up -d || die "diun: unable to bring up container"

@ -0,0 +1,55 @@
<?php
/*
|--------------------------------------------------------------------------
| Create The Application
|--------------------------------------------------------------------------
|
| The first thing we will do is create a new Laravel application instance
| which serves as the "glue" for all the components of Laravel, and is
| the IoC container for the system binding all of the various parts.
|
*/
$app = new Illuminate\Foundation\Application(
realpath(__DIR__.'/../')
);
/*
|--------------------------------------------------------------------------
| Bind Important Interfaces
|--------------------------------------------------------------------------
|
| Next, we need to bind some important interfaces into the container so
| we will be able to resolve them when needed. The kernels serve the
| incoming requests to this application from both the web and CLI.
|
*/
$app->singleton(
Illuminate\Contracts\Http\Kernel::class,
App\Http\Kernel::class
);
$app->singleton(
Illuminate\Contracts\Console\Kernel::class,
App\Console\Kernel::class
);
$app->singleton(
Illuminate\Contracts\Debug\ExceptionHandler::class,
App\Exceptions\Handler::class
);
/*
|--------------------------------------------------------------------------
| Return The Application
|--------------------------------------------------------------------------
|
| This script returns the application instance. The instance is given to
| the calling script so we can separate the building of the instances
| from the actual running of the application and sending responses.
|
*/
return $app;

@ -0,0 +1,72 @@
services:
## App and Worker
app:
image: osresearch/pixelfed
restart: unless-stopped
env_file:
- ../env.production
- env.production
- ../data/pixelfed/env.secrets
volumes:
- ../data/pixelfed/app-storage:/var/www/storage
- ../data/pixelfed/app-bootstrap:/var/www/bootstrap
- ../data/pixelfed/env.secrets:/var/www/.env
networks:
- external
- internal
ports:
- "8090:80"
depends_on:
- db
- redis
labels:
- "diun.enable=true"
worker:
image: osresearch/pixelfed
restart: unless-stopped
env_file:
- ../env.production
- env.production
- ../data/pixelfed/env.secrets
volumes:
- ../data/pixelfed/app-storage:/var/www/storage
- ../data/pixelfed/app-bootstrap:/var/www/bootstrap
- ../data/pixelfed/env.secrets:/var/www/.env
networks:
- external
- internal
command: gosu www-data php artisan horizon
depends_on:
- db
- redis
## DB and Cache
db:
image: mysql:8.0
restart: unless-stopped
networks:
- internal
command: --default-authentication-plugin=mysql_native_password
env_file:
- ../env.production
- env.production
volumes:
- "../data/pixelfed/db-data:/var/lib/mysql"
redis:
image: redis:5-alpine
restart: unless-stopped
env_file:
- ../env.production
- env.production
volumes:
- "../data/pixelfed/redis-data:/data"
networks:
- internal
networks:
internal:
internal: true
external:
driver: bridge

@ -0,0 +1,158 @@
## Crypto
# APP_KEY is set env.secrets
## General Settings
APP_ENV=production
APP_DEBUG=false
# domain name specifics are passed in env.secrets
# APP_NAME="Pixelfed Prod (Testing)"
# APP_URL="https://pixelfed.hackerspace.zone"
# APP_DOMAIN="pixelfed.hackerspace.zone"
# ADMIN_DOMAIN="pixelfed.hackerspace.zone"
# SESSION_DOMAIN="pixelfed.hackerspace.zone"
OPEN_REGISTRATION=true
ENFORCE_EMAIL_VERIFICATION=false
PF_MAX_USERS=1000
OAUTH_ENABLED=false
APP_TIMEZONE=UTC
APP_LOCALE=en
## Pixelfed Tweaks
LIMIT_ACCOUNT_SIZE=true
MAX_ACCOUNT_SIZE=1000000
MAX_PHOTO_SIZE=15000
MAX_AVATAR_SIZE=2000
MAX_CAPTION_LENGTH=500
MAX_BIO_LENGTH=125
MAX_NAME_LENGTH=30
MAX_ALBUM_LENGTH=4
IMAGE_QUALITY=80
PF_OPTIMIZE_IMAGES=true
PF_OPTIMIZE_VIDEOS=true
ADMIN_ENV_EDITOR=false
ACCOUNT_DELETION=true
ACCOUNT_DELETE_AFTER=false
MAX_LINKS_PER_POST=0
## Instance
# INSTANCE_DESCRIPTION is set in env.secrets
INSTANCE_PUBLIC_HASHTAGS=false
#INSTANCE_CONTACT_EMAIL=
INSTANCE_PUBLIC_LOCAL_TIMELINE=true
INSTANCE_DISCOVER_PUBLIC=true
#BANNED_USERNAMES=
STORIES_ENABLED=false
RESTRICTED_INSTANCE=false
## Mail config is in env.secrets
# MAIL_DRIVER=log
# MAIL_HOST=smtp.mailtrap.io
# MAIL_PORT=2525
# MAIL_FROM_ADDRESS="pixelfed@example.com"
# MAIL_FROM_NAME="Pixelfed"
# MAIL_USERNAME=null
# MAIL_PASSWORD=null
# MAIL_ENCRYPTION=null
## Databases (MySQL)
DB_CONNECTION=mysql
DB_DATABASE=pixelfed_prod
DB_HOST=db
DB_PASSWORD=pixelfed_db_pass
DB_PORT=3306
DB_USERNAME=pixelfed
# pass the same values to the db itself
MYSQL_DATABASE=pixelfed_prod
MYSQL_PASSWORD=pixelfed_db_pass
MYSQL_RANDOM_ROOT_PASSWORD=true
MYSQL_USER=pixelfed
## Databases (Postgres)
#DB_CONNECTION=pgsql
#DB_HOST=postgres
#DB_PORT=5432
#DB_DATABASE=pixelfed
#DB_USERNAME=postgres
#DB_PASSWORD=postgres
## Cache (Redis)
REDIS_CLIENT=phpredis
REDIS_SCHEME=tcp
REDIS_HOST=redis
REDIS_PASSWORD=redis_password
REDIS_PORT=6379
REDIS_DATABASE=0
## EXPERIMENTS
EXP_LC=false
EXP_REC=false
EXP_LOOPS=false
## ActivityPub Federation
## enable all activity pub interfaces
ACTIVITY_PUB=true
AP_REMOTE_FOLLOW=true
AP_SHAREDINBOX=true
AP_INBOX=true
AP_OUTBOX=true
ATOM_FEEDS=true
NODEINFO=true
WEBFINGER=true
## S3
FILESYSTEM_DRIVER=local
FILESYSTEM_CLOUD=s3
PF_ENABLE_CLOUD=false
#AWS_ACCESS_KEY_ID=
#AWS_SECRET_ACCESS_KEY=
#AWS_DEFAULT_REGION=
#AWS_BUCKET=
#AWS_URL=
#AWS_ENDPOINT=
#AWS_USE_PATH_STYLE_ENDPOINT=false
## Horizon
HORIZON_DARKMODE=true
## COSTAR - Confirm Object Sentiment Transform and Reduce
PF_COSTAR_ENABLED=false
# Media
MEDIA_EXIF_DATABASE=false
## Logging
LOG_CHANNEL=stderr
## Image
IMAGE_DRIVER=imagick
## Broadcasting
BROADCAST_DRIVER=log # log driver for local development
## Cache
CACHE_DRIVER=redis
## Purify
RESTRICT_HTML_TYPES=true
## Queue
QUEUE_DRIVER=redis
## Session
SESSION_DRIVER=redis
## Trusted Proxy
TRUST_PROXIES="*"
## Passport
#PASSPORT_PRIVATE_KEY=
#PASSPORT_PUBLIC_KEY=
## OIDC for logins passed in in env.secrets
# OIDC_CLIENT_ID, OIDC_CLIENT_SECRET
# OIDC provider URL must include realm
# OIDC_PROVIDER_URL=https://login.hackerspace.zone/realms/hackerspace
OIDC_PROVIDER_NAME=oidc

@ -0,0 +1,100 @@
#!/bin/bash
MODULE=pixelfed
die() { echo >&2 "$MODULE: $@" ; exit 1 ; }
info() { echo >&2 "$MODULE: $@" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
source ../env.smtp 2>/dev/null
DATA="../data/$MODULE"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "unable to start"
exit 0
fi
docker compose down 2>/dev/null
CLIENT_SECRET="$(openssl rand -hex 20)"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
APP_KEY=
INSTANCE_DESCRIPTION="${DOMAIN_NAME} pixelfed"
OIDC_CLIENT_ID=$MODULE
OIDC_CLIENT_SECRET=${CLIENT_SECRET}
OIDC_PROVIDER_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}
APP_NAME="${DOMAIN_NAME} Pixelfed"
APP_URL="https://${PIXELFED_HOSTNAME}"
APP_DOMAIN="${PIXELFED_HOSTNAME}"
ADMIN_DOMAIN="${PIXELFED_HOSTNAME}"
SESSION_DOMAIN="${PIXELFED_HOSTNAME}"
EOF
if [ -n "$SMTP_SERVER" ]; then
cat <<EOF >> "$SECRETS"
MAIL_DRIVER=log
MAIL_HOST=${SMTP_SERVER}
MAIL_PORT=${SMTP_PORT}
MAIL_FROM_ADDRESS="pixelfed@${DOMAIN_NAME}"
MAIL_FROM_NAME="Pixelfed"
MAIL_USERNAME="${SMTP_USER}"
MAIL_PASSWORD="${SMTP_PASSWORD}"
# MAIL_ENCRYPTION=null
EOF
fi
chown www-data:www-data "$SECRETS"
../keycloak/client-delete $MODULE 2>/dev/null
../keycloak/client-create <<EOF || die "unable to create client"
{
"clientId": "$MODULE",
"rootUrl": "https://$PIXELFED_HOSTNAME",
"adminUrl": "https://$PIXELFED_HOSTNAME",
"redirectUris": [ "https://$PIXELFED_HOSTNAME/*" ],
"webOrigins": [ "https://$PIXELFED_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET"
}
EOF
# setup some of the bootstrap and data directories
mkdir -p "$DATA/app-bootstrap/cache" || die "mkdir bootstrap/cache"
cp ./app.php "$DATA/app-bootstrap" || die "cp app.php"
chown -R www-data:www-data "$DATA/app-bootstrap" || die "chown bootstrap"
docker compose up -d || die "unable to start container"
# need to wait for stuff to finish setup
info "Sleeping while stuff starts"
sleep 20
# some of these are to work around docker file weirdness that expects the volume to be prepopulated
#docker compose exec app bash -c "touch .env && chown www-data:www-data .env" || die ".env create"
#docker compose exec app cp -R storage.skel storage || die "storage create"
#docker compose exec -u www-data app composer install --prefer-dist --no-interaction --no-ansi --optimize-autoloader || die "composer install"
docker compose exec -u www-data app php artisan key:generate || die "key:generate"
docker compose exec -u www-data app php artisan storage:link || die "storage:link"
docker compose exec -u www-data app php artisan migrate --force || die "migrate"
#docker compose exec app php artisan import:cities || die "import:cities"
docker compose exec -u www-data app php artisan instance:actor || die "instance:actor"
docker compose exec -u www-data app php artisan passport:keys || die "passport:keys"
docker compose exec -u www-data app php artisan route:cache || die "route:cache"
docker compose exec -u www-data app php artisan view:cache || die "view:cache"
docker compose exec -u www-data app php artisan config:cache || die "config:cache"
# bounce it to reload all of the state
docker compose down || die "unable to bring down"
docker compose up -d || die "unable to restart"
#php artisan route:clear
#php artisan view:clear
#php artisan config:clear

@ -0,0 +1,24 @@
services:
prometheus:
image: prom/prometheus
restart: always
container_name: prometheus
user: root
volumes:
- ../data/prometheus/storage:/prometheus:rw
- ./prometheus.yaml:/etc/prometheus/prometheus.yml:ro
- ./entrypoint.sh:/entrypoint.sh:ro
entrypoint: ["/entrypoint.sh"]
cadvisor:
image: gcr.io/cadvisor/cadvisor:latest
restart: always
container_name: cadvisor
command:
- '--housekeeping_interval=15s'
- '--docker_only=true'
volumes:
- /:/rootfs:ro
- /var/run:/var/run:rw
- /sys:/sys:ro
- /var/lib/docker/:/var/lib/docker:ro

@ -0,0 +1,10 @@
#!/bin/sh -x
chmod 777 /prometheus
exec su -s /bin/sh nobody <<EOF
exec /bin/prometheus \
--config.file=/etc/prometheus/prometheus.yml \
--web.console.libraries=/etc/prometheus/console_libraries \
--web.console.templates=/etc/prometheus/consoles
EOF

@ -0,0 +1,24 @@
global:
scrape_interval: 30s
external_labels:
monitor: 'codelab-monitor'
scrape_configs:
# nginx vts data
- job_name: 'nginx'
scrape_interval: 30s
metrics_path: "/status/format/prometheus"
static_configs:
- targets: ['nginx:80']
- job_name: 'metrics'
scrape_interval: 30s
static_configs:
# grafana data from /metrics
- targets: ['dashboard:3000']
# host running the docker compose
- targets: ['172.17.0.1:9100']
- job_name: "synapse"
scrape_interval: 30s
metrics_path: "/_synapse/metrics"
static_configs:
- targets: ["matrix-synapse:9000"]

@ -0,0 +1 @@
works in progress, and broken things go here

@ -0,0 +1,50 @@
services:
bookstack:
image: lscr.io/linuxserver/bookstack:24.02.3
container_name: bookstack
env_file:
- ../env.production
- env.production
- ../data/bookstack/env.secrets
environment:
- PUID=1000
- PGID=1000
- DB_HOST=bookstack_db
- DB_PORT=3306
- DB_USER=bookstack
- DB_DATABASE=bookstackapp
- DB_PASS=bookstackpass
volumes:
- ../data/bookstack/app_config:/config
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "6875:80"
restart: unless-stopped
depends_on:
- bookstack_db
labels:
- diun.watch_repo=true
- diun.include_tags=^24\.\d+\.\d+
- diun.sort_tags=semver
- diun.enable=true
bookstack_db:
image: lscr.io/linuxserver/mariadb
container_name: bookstack_db
env_file:
- ../env.production
- env.production
- ../data/bookstack/env.secrets
environment:
- PUID=1000
- PGID=1000
- TZ=America/New_York
- MYSQL_DATABASE=bookstackapp
- MYSQL_USER=bookstack
- MYSQL_PASSWORD=bookstackpass
volumes:
- ../data/bookstack/db_config:/config
ports:
- "3306:3306"
restart: unless-stopped

@ -0,0 +1,8 @@
AUTH_METHOD=oidc
AUTH_AUTO_INITIATE=true
OIDC_NAME=Keycloak
OIDC_DISPLAY_NAME_CLAIMS=name
OIDC_CLIENT_ID=bookstack
OIDC_ISSUER_DISCOVER=true

@ -0,0 +1,65 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
DATA="../data/bookstack"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "hedgedoc: unable to start"
exit 0
fi
docker compose down 2>/dev/null
# regenerate the client secrets
CLIENT_SECRET="$(openssl rand -hex 20)"
SESSION_SECRET="$(openssl rand -hex 20)"
MYSQL_ROOT_PASSWORD="$(openssl rand -hex 24)"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
APP_URL=https://${BOOKSTACK_HOSTNAME}
MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
OIDC_ISSUER=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}
OIDC_CLIENT_SECRET=${CLIENT_SECRET}
#OIDC_AUTH_ENDPOINT=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/auth
#OIDC_TOKEN_ENDPOINT=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/token
EOF
../keycloak/client-delete bookstack
../keycloak/client-create <<EOF || die "unable to create bookstack client"
{
"clientId": "bookstack",
"rootUrl": "https://$BOOKSTACK_HOSTNAME",
"adminUrl": "https://$BOOKSTACK_HOSTNAME",
"redirectUris": [ "https://$BOOKSTACK_HOSTNAME/*" ],
"webOrigins": [ "https://$BOOKSTACK_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET",
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"id",
"email"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"microprofile-jwt"
]
}
EOF
docker compose up -d || die "hedgedoc: unable to start container"

@ -1 +0,0 @@
# backup this entire folder

@ -0,0 +1,14 @@
DOMAIN_NAME=woodbine.nyc
REALM=basement
KEYCLOAK_HOSTNAME=login.woodbine.nyc
HEDGEDOC_HOSTNAME=basement.woodbine.nyc
MASTODON_HOSTNAME=social.woodbine.nyc
NEXTCLOUD_HOSTNAME=cloud.woodbine.nyc
GITEA_HOSTNAME=git.woodbine.nyc
MATRIX_HOSTNAME=chat.woodbine.nyc
MOBILIZON_HOSTNAME=events.woodbine.nyc
#LIBIB_HOSTNAME=library.woodbine.nyc
BOOKSTACK_HOSTNAME=wiki.woodbine.nyc
#MANYFOLD_HOSTNAME=things.woodbine.nyc
ZULIP_HOSTNAME=library.woodbine.nyc

@ -0,0 +1,10 @@
#
# To enable sending emails, please make an account at sendgrind (free should be fine)
# and copy the API key into the SMTP password. If you don't do this first, you'll
# have to setup it later, which will involve some manual effort to configure each
# sub-site.
#
SMTP_SERVER=smtp.sendgrid.net
SMTP_PORT=587
SMTP_USER=apikey
SMTP_PASSWORD=LONG-STRING-GOES-HERE

@ -1,15 +0,0 @@
# copy this to .env and it will be sourced by the appropriate services
# domain your services will be running on
DOMAIN=localhost
# admin user for auth
ADMIN_USER=
ADMIN_PASS=
# used for sending notifications and reset passwords
# only supports smtp+starttls
SMTP_ADDR=
SMTP_PORT=587
SMTP_USER=
SMTP_PASS=

@ -0,0 +1,3 @@
# gitea
OIDC setup is now automated

@ -0,0 +1,40 @@
#!/bin/bash
die() { echo >&2 "gitea: ERROR $*" ; exit 1 ; }
info() { echo >&2 "gitea: $*" ; }
if grep -q "^git:" /etc/passwd ; then
info "git user already exists"
exit 0
fi
SSHDIR="/home/git/.ssh"
addgroup --gid 2222 git \
|| die "unable to create git group"
adduser \
--uid 2222 \
--gid 2222 \
--disabled-password \
--gecos "Gitea Proxy User" \
git \
|| die "unable to add git user"
rm -f "$SSHDIR/id_rsa" "$SSHDIR/id_rsa.pub" "$SSHDIR/authorized_keys"
sudo -u git ssh-keygen \
-t rsa \
-b 4096 \
-C "Gitea Proxy User Key" \
-N "" \
-f "$SSHDIR/id_rsa" \
|| die "unable to create host key"
sudo -u git tee -a "$SSHDIR/authorized_keys" < "$SSHDIR/id_rsa.pub" \
|| die "unable to setup authorized key"
chmod 600 "$SSHDIR/authorized_keys"
cat <<"EOF" > "/usr/local/bin/gitea"
#!/bin/sh
ssh -p 2222 -o StrictHostKeyChecking=no git@127.0.0.1 "SSH_ORIGINAL_COMMAND=\"$SSH_ORIGINAL_COMMAND\" $0 $@"
EOF
chmod +x "/usr/local/bin/gitea"

@ -0,0 +1,46 @@
networks:
gitea:
external: false
services:
gitea:
image: codeberg.org/forgejo/forgejo:1.18.0-1
env_file:
- ../env.production
- env.production
- ../data/gitea/env.secrets
environment:
- USER_UID=2222 # must match git user on host system
- USER_GID=2222
- GITEA__database__DB_TYPE=postgres
- GITEA__database__HOST=db:5432
- GITEA__database__NAME=gitea
- GITEA__database__USER=gitea
- GITEA__database__PASSWD=gitea
networks:
- gitea
volumes:
- ../data/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
- /home/git/.ssh/:/data/git/.ssh
ports:
- "3030:3000"
- "2222:22"
restart: always
depends_on:
- db
labels:
- "diun.enable=true"
db:
image: postgres:13.4-alpine
restart: always
environment:
- POSTGRES_USER=gitea
- POSTGRES_PASSWORD=gitea
- POSTGRES_DB=gitea
volumes:
- ../data/gitea/postgres:/var/lib/postgresql/data
networks:
- gitea

@ -0,0 +1,7 @@
# gitea config for keycloak integration
# only allow open id sign-in, turn off all other registrations
GITEA__openid__ENABLE_OPENID_SIGNIN=true
GITEA__openid__ENABLE_OPENID_SIGNUP=false
#GITEA__service__DISABLE_REGISTRATION=true
GITEA__service__ALLOW_ONLY_EXTERNAL_REGISTRATION=true
GITEA__repository__DEFAULT_BRANCH=main

@ -0,0 +1,69 @@
#!/bin/bash
die() { echo >&2 "gitea: ERROR $*" ; exit 1 ; }
info() { echo >&2 "gitea: $*" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level environment"
source ./env.production || die "no local environment"
DATA="../data/gitea"
SECRETS="$DATA/env.secrets"
INI="$DATA/gitea/conf/app.ini"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "unable to start"
exit 0
fi
./add-ssh-user || die "unable to add ssh user"
GITEA_CLIENT_SECRET="$(openssl rand -hex 32)"
GITEA_ADMIN_PASSWORD="$(openssl rand -hex 8)"
info "creating new secrets $SECRETS"
mkdir -p "$DATA"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
GITEA_CLIENT_SECRET=$GITEA_CLIENT_SECRET
GITEA_ADMIN_PASSWORD=$GITEA_ADMIN_PASSWORD
GITEA__server__ROOT_URL=https://$GITEA_HOSTNAME/
GITEA__server__SSH_DOMAIN=$GITEA_HOSTNAME
GITEA__security__INSTALL_LOCK=true
GITEA__security__SECRET_KEY=$(openssl rand -hex 32)
EOF
docker compose down 2>/dev/null
../keycloak/client-delete gitea 2>/dev/null
../keycloak/client-create <<EOF || die "unable to create gitea client"
{
"clientId": "gitea",
"rootUrl": "https://$GITEA_HOSTNAME",
"adminUrl": "https://$GITEA_HOSTNAME",
"redirectUris": [ "https://$GITEA_HOSTNAME/*" ],
"webOrigins": [ "https://$GITEA_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$GITEA_CLIENT_SECRET"
}
EOF
docker compose up -d || die "unable to start container"
info "waiting for startup..."
sleep 5
info "adding oauth login"
docker compose exec -u git gitea \
gitea admin auth add-oauth \
--name "keycloak" \
--provider "openidConnect" \
--key "gitea" \
--secret "$GITEA_CLIENT_SECRET" \
--auto-discover-url "https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/.well-known/openid-configuration" \
--group-claim-name "groups" \
--admin-group "admin" \
|| die "unable to add oauth interface"

@ -0,0 +1,36 @@
services:
database:
image: postgres:13.4-alpine
environment:
- POSTGRES_USER=hedgedoc
- POSTGRES_PASSWORD=password
- POSTGRES_DB=hedgedoc
volumes:
- ../data/hedgedoc/database:/var/lib/postgresql/data
restart: always
hedgedoc:
# Make sure to use the latest release from https://hedgedoc.org/latest-release
image: quay.io/hedgedoc/hedgedoc:1.9.9
env_file:
- ../env.production
- env.production
- ../data/hedgedoc/env.secrets
environment:
#- CMD_CSP_ENABLE=false
- CMD_DB_URL=postgres://hedgedoc:password@database:5432/hedgedoc
- CMD_PROTOCOL_USESSL=true
- CMD_ALLOW_ANONYMOUS=false # anonymous user's can't create notes
- CMD_ALLOW_ANONYMOUS_EDITS=true # but they can be invited to edit notes
- CMD_ALLOW_FREEURL=true # users can create arbitrary names
- CMD_EMAIL=false # only oauth logins
- UPLOADS_MODE=666
# DOMAIN and OAUTH2 variables are now in env.secret
volumes:
- ../data/hedgedoc/uploads:/hedgedoc/public/uploads
ports:
- "3000:3000"
restart: always
depends_on:
- database
labels:
- "diun.enable=true"

@ -0,0 +1,2 @@
CMD_OAUTH2_CLIENT_SECRET=abcdef1234
CMD_SESSION_SECRET=abcdef1234

@ -0,0 +1,69 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
DATA="../data/hedgedoc"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "hedgedoc: unable to start"
exit 0
fi
docker compose down 2>/dev/null
# regenerate the client secrets
CLIENT_SECRET="$(openssl rand -hex 20)"
SESSION_SECRET="$(openssl rand -hex 20)"
mkdir -p "$DATA/uploads"
chmod 666 "$DATA/uploads"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
CMD_OAUTH2_CLIENT_SECRET=$CLIENT_SECRET
CMD_SESSION_SECRET=$SESSION_SECRET
CMD_DOMAIN=${HEDGEDOC_HOSTNAME}
CMD_OAUTH2_AUTHORIZATION_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/auth
CMD_OAUTH2_TOKEN_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/token
CMD_OAUTH2_USER_PROFILE_URL=https://${KEYCLOAK_HOSTNAME}/realms/${REALM}/protocol/openid-connect/userinfo
CMD_OAUTH2_USER_PROFILE_USERNAME_ATTR=preferred_username
CMD_OAUTH2_USER_PROFILE_DISPLAY_NAME_ATTR=name
CMD_OAUTH2_USER_PROFILE_EMAIL_ATTR=email
CMD_OAUTH2_CLIENT_ID=hedgedoc
CMD_OAUTH2_PROVIDERNAME=Keycloak
EOF
../keycloak/client-delete hedgedoc
../keycloak/client-create <<EOF || die "unable to create hedgedoc client"
{
"clientId": "hedgedoc",
"rootUrl": "https://$HEDGEDOC_HOSTNAME",
"adminUrl": "https://$HEDGEDOC_HOSTNAME",
"redirectUris": [ "https://$HEDGEDOC_HOSTNAME/*" ],
"webOrigins": [ "https://$HEDGEDOC_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET",
"defaultClientScopes": [
"web-origins",
"acr",
"profile",
"roles",
"id",
"email"
],
"optionalClientScopes": [
"address",
"phone",
"offline_access",
"microprofile-jwt"
]
}
EOF
docker compose up -d || die "hedgedoc: unable to start container"

@ -0,0 +1,53 @@
<!DOCTYPE html>
<html>
<head>
<meta charset='utf-8' />
<meta http-equiv="Content-Security-Policy" content="frame-src '*'">
<title>
Hackerspace.Zone calendar viewer
</title>
<style>
html, body {
margin: 0;
padding: 0;
font-family: Arial, Helvetica Neue, Helvetica, sans-serif;
font-size: 14px;
}
#calendar {
max-width: 800px;
margin: 00px auto;
}
</style>
<link href='https://cdn.jsdelivr.net/npm/fullcalendar@5.11.0/main.min.css' rel='stylesheet' />
<script src='https://cdn.jsdelivr.net/npm/fullcalendar@5.11.0/main.min.js'></script>
<script src='https://github.com/mozilla-comm/ical.js/releases/download/v1.4.0/ical.js'></script>
<script src='https://cdn.jsdelivr.net/npm/@fullcalendar/icalendar@5.11.0/main.global.min.js'></script>
<script>
// 'https://events.hackerspace.zone/@events/feed/ics',
const param = new URL(location).searchParams;
const ics = param.get("ics") || 'https://events.hackerspace.zone/@events/feed/ics';
const view = param.get("view") || "dayGridMonth";
document.addEventListener('DOMContentLoaded', function() {
var calendarEl = document.getElementById('calendar');
var calendar = new FullCalendar.Calendar(calendarEl, {
initialView: view,
headerToolbar: {
left: 'prev,next today',
center: 'title',
right: 'dayGridMonth,timeGridWeek,timeGridDay'
},
events: {
url: ics,
format: 'ics'
}
});
calendar.render();
});
</script>
</head>
<body>
<div id='calendar'></div>
</html>

@ -0,0 +1,47 @@
<h1>hackerspace.zone</h1>
An easy to install set of self-hosted, single-sign-on, open-source services.
<ul>
<li><a href="https://matrix.hackerspace.zone/">matrix</a>: realtime chat
<li><a href="https://docs.hackerspace.zone/">hedgedoc</a>: collaborative markdown editing
<li><a href="https://social.hackerspace.zone/">mastodon</a>: federated social media
<li><a href="https://events.hackerspace.zone/">mobilizon</a>: event planning and RSVP
<li><a href="https://cloud.hackerspace.zone/">nextcloud</a>: self hosted documents and calendaring
<li><a href="https://dashboard.hackerspace.zone/">grafana</a>: dashboards and statistic collection
<li><a href="https://git.hackerspace.zone/">gitea</a>: git repository hosting
<li><a href="https://login.hackerspace.zone/">keycloak</a>: user management and single sign on for the domain
</ul>
<h2>Upcoming events</h2>
<iframe src="calview.html?ics=https://events.hackerspace.zone/@events/feed/ics" width=600 height=600 style="border:0"></iframe>
<h2>Source code</h2>
Source code for building the environments and configuring them:
<a href="https://github.com/osresearch/hackerspace-zone"><tt>github.com/osresearch/hackerspace-zone</tt></a>
There is <a href="https://github.com/osresearch/hackerspace-zone/issues/1">probably a better way to do this</a>.
<hr>
<h2>Useful links</h2>
<ul>
<li> <a href="https://login.hackerspace.zone/realms/hackerspace/login/">keycloak user account</a>
<li> <a href="https://login.hackerspace.zone/admin/hackerspace/console/">keycloak realm management</a>
<li> <a href="https://social.hackerspace.zone/admin/dashboard">mastodon admin dashboard</a>
<li> <a href="https://social.hackerspace.zone/admin/reports">mastodon moderation dashboard</a>
</ul>
<h2>Notes</h2>
* Mastodon doesn't set roles via SSO ([issue 18335](https://github.com/mastodon/mastodon/issues/18335)). To make a user admin from the command line as <tt>root</tt> requires the username:
<pre>
cd mastodon
docker compose exec -T mastodon \
bin/tootctl accounts modify USERNAME --role admin
</pre>
* Mobilizon doesn't set roles via SSO ([issue 1105](https://framagit.org/framasoft/mobilizon/-/issues/1105), on roadmap for 2.2). To make a user admin from the command line as <tt>root</tt> requires the registered email address:
<pre>
cd mobilizon
docker compose exec -T mobilizon \
bin/mobilizon_ctl users.modify EMAIL --admin
</pre>

@ -0,0 +1,7 @@
# Keycloak
Keycloak is the single-sign-on user authentication provider.
You must set the `KEYCLOAK_ADMIN_PASSWORD` in the `env.secrets` file.
This is the most important secret: it allows user accounts to be created
for all the other services.

@ -0,0 +1,20 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top levle env?"
source env.production || die "no local env?"
source "../data/keycloak/env.secrets" || die "no local secrets?"
docker compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
create clients \
--server http://localhost:8080/ \
--user admin \
--realm master \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
-r "$REALM" \
-f - \
|| die "create client failed"

@ -0,0 +1,40 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top levle env?"
source env.production || die "no local env?"
source "../data/keycloak/env.secrets" || die "no local secrets?"
# try to get the clients by name
CLIENT_NAME="$1"
if [ -z "$CLIENT_NAME" ]; then
die "usage: $0 clientName"
fi
CLIENT_ID="$(docker compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
get clients \
--server http://localhost:8080/ \
--user admin \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
--realm master \
-r "$REALM" \
| jq -r ".[] | select( .clientId == \"$CLIENT_NAME\" ).id")"
if [ -z "$CLIENT_ID" ]; then
die "$CLIENT_NAME: no such client"
fi
echo "$0: $CLIENT_NAME = $CLIENT_ID"
docker compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
delete "clients/$CLIENT_ID" \
--server http://localhost:8080/ \
--user admin \
--realm master \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
-r "$REALM" \
|| die "$CLIENT_NAME($CLIENT_ID): unable to remove"

@ -0,0 +1,43 @@
volumes:
mysql_data:
driver: local
services:
mysql:
image: mysql:5.7
restart: always
volumes:
- ../data/keycloak/database:/var/lib/mysql
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: keycloak
MYSQL_USER: keycloak
MYSQL_PASSWORD: password
keycloak:
image: quay.io/keycloak/keycloak:18.0.0
restart: always
entrypoint: /opt/keycloak/bin/kc.sh start --hostname="$${KEYCLOAK_HOSTNAME}" --proxy=edge
user: "0:0" # otherwise the persistent data directory is not writable
env_file:
- ../env.production
- env.production
- ../data/keycloak/env.secrets
environment:
DB_VENDOR: MYSQL
DB_ADDR: mysql
DB_DATABASE: keycloak
DB_USER: keycloak
DB_PASSWORD: password
KEYCLOAK_ADMIN: admin
# KEYCLOAK_ADMIN_PASSWORD should be set in env.secrets
PROXY_ADDRESS_FORWARDING: 'true'
volumes:
- ../data/keycloak/certs:/etc/x509/https
- ../data/keycloak/keycloak:/opt/keycloak/data
ports:
- 8080:8080
depends_on:
- mysql
labels:
- "diun.enable=true"

@ -0,0 +1,119 @@
#!/bin/bash
die() { echo >&2 "keycloak: ERROR: $@" ; exit 1 ; }
info() { echo >&2 "keycloak: $@" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production
source ./env.production
source "../env.smtp" 2>/dev/null
SECRETS="../data/keycloak/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "keycloak: unable to start container"
exit 0
fi
docker compose down 2>/dev/null
KEYCLOAK_ADMIN_PASSWORD="$(openssl rand -hex 8)"
echo "Keycloak admin password $KEYCLOAK_ADMIN_PASSWORD"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
KEYCLOAK_ADMIN_PASSWORD=$KEYCLOAK_ADMIN_PASSWORD
EOF
docker compose up -d || die "unable to start keycloak"
echo "sleeping a minute while keycloak initializes..."
sleep 30
info "logging into server"
docker compose exec keycloak \
/opt/keycloak/bin/kcadm.sh \
config credentials \
--server http://localhost:8080/ \
--user admin \
--password "$KEYCLOAK_ADMIN_PASSWORD" \
--realm master \
|| die "unable to login"
info "Create a new realm for '$REALM'"
docker compose exec keycloak \
/opt/keycloak/bin/kcadm.sh \
create realms \
-s "realm=$REALM" \
-s enabled=true \
|| die "unable to create realm"
# https://github.com/hedgedoc/hedgedoc/issues/56
info "Fix up a id bug"
docker compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
create client-scopes \
-r "$REALM" \
-f - <<EOF || die "unable to create mapping"
{
"name": "id",
"protocol": "openid-connect",
"attributes": {
"include.in.token.scope": "true",
"display.on.consent.screen": "true"
},
"protocolMappers": [
{
"name": "id",
"protocol": "openid-connect",
"protocolMapper": "oidc-usermodel-property-mapper",
"consentRequired": false,
"config": {
"user.attribute": "id",
"id.token.claim": "true",
"access.token.claim": "true",
"jsonType.label": "String",
"userinfo.token.claim": "true"
}
}
]
}
EOF
if [ -n "$SMTP_SERVER" ]; then
info "configuring email"
docker compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh update "realms/$REALM" \
-f - <<EOF || die "unable to configure email"
{
"resetPasswordAllowed": "true",
"smtpServer" : {
"auth" : "true",
"starttls" : "true",
"user" : "$SMTP_USER",
"password" : "$SMTP_PASSWORD",
"port" : "$SMTP_PORT",
"host" : "$SMTP_SERVER",
"from" : "keycloak@$DOMAIN_NAME",
"fromDisplayName" : "Keycloak @ $DOMAIN_NAME",
"ssl" : "false"
}
}
EOF
fi
info "Create an admin user in realm"
docker compose exec -T keycloak \
/opt/keycloak/bin/kcadm.sh \
create users \
-o \
--fields id,username \
-r "$REALM" \
-s username=admin \
-s enabled=true \
-s 'credentials=[{"type":"'$KEYCLOAK_ADMIN_PASSWORD'","value":"admin","temporary":false}]' \
|| die "$REALM: unable to create admin user"

@ -0,0 +1,4 @@
# Mastodon
This is the vanilla version with Elastic Search and Single-Sign-On enabled.
No other user accounts are allowed to join.

@ -0,0 +1,132 @@
services:
database:
image: postgres:13.4-alpine
restart: always
#shm_size: 256mb
networks:
- internal_network
healthcheck:
test: ['CMD', 'pg_isready', '-U', "mastodon", "-d", "mastodon_production"]
volumes:
- ../data/mastodon/database:/var/lib/postgresql/data
environment:
- POSTGRES_USER=mastodon
- POSTGRES_PASSWORD=mastodon
#- POSTGRES_DB=mastodon_production
redis:
image: redis:6-alpine
restart: always
networks:
- internal_network
healthcheck:
test: ['CMD', 'redis-cli', 'ping']
volumes:
- ../data/mastodon/redis:/data
es:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
restart: always
environment:
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
- "cluster.name=es-mastodon"
- "discovery.type=single-node"
- "bootstrap.memory_lock=true"
networks:
- internal_network
healthcheck:
test: ["CMD-SHELL", "curl --silent --fail localhost:9200/_cluster/health || exit 1"]
volumes:
- ../data/mastodon/elasticsearch:/usr/share/elasticsearch/data
# fixup the permissions on the data directory since they are created as root on host
entrypoint: /bin/sh -c "chown -R elasticsearch:elasticsearch data && /usr/local/bin/docker-entrypoint.sh eswrapper"
ulimits:
memlock:
soft: -1
hard: -1
mastodon:
image: tootsuite/mastodon
restart: always
env_file:
- ../env.production
- env.production
- ../data/mastodon/env.secrets
command: bash -c "rm -f /mastodon/tmp/pids/server.pid; bundle exec rails s -p 6001"
networks:
- external_network
- internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:6001/health || exit 1']
ports:
- '6001:6001'
depends_on:
- database
- redis
- es
volumes:
- ../data/mastodon/system:/mastodon/public/system
labels:
- "diun.enable=true"
streaming:
image: tootsuite/mastodon
restart: always
env_file:
- ../env.production
- env.production
- ../data/mastodon/env.secrets
command: node ./streaming
networks:
- external_network
- internal_network
healthcheck:
# prettier-ignore
test: ['CMD-SHELL', 'wget -q --spider --proxy=off localhost:4000/api/v1/streaming/health || exit 1']
ports:
- '4000:4000'
depends_on:
- database
- redis
sidekiq:
image: tootsuite/mastodon
restart: always
env_file:
- ../env.production
- env.production
- ../data/mastodon/env.secrets
command: bundle exec sidekiq
depends_on:
- database
- redis
networks:
- external_network
- internal_network
volumes:
- ../data/mastodon/system:/mastodon/public/system
healthcheck:
test: ['CMD-SHELL', "ps aux | grep '[s]idekiq\ 6' || false"]
## Uncomment to enable federation with tor instances along with adding the following ENV variables
## http_proxy=http://privoxy:8118
## ALLOW_ACCESS_TO_HIDDEN_SERVICE=true
# tor:
# image: sirboops/tor
# networks:
# - external_network
# - internal_network
#
# privoxy:
# image: sirboops/privoxy
# volumes:
# - ./priv-config:/opt/config
# networks:
# - external_network
# - internal_network
networks:
external_network:
internal_network:
internal: true

@ -0,0 +1,82 @@
# This is a sample configuration file. You can generate your configuration
# with the `rake mastodon:setup` interactive setup wizard, but to customize
# your setup even further, you'll need to edit it manually. This sample does
# not demonstrate all available configuration options. Please look at
# https://docs.joinmastodon.org/admin/config/ for the full documentation.
# Note that this file accepts slightly different syntax depending on whether
# you are using `docker compose` or not. In particular, if you use
# `docker compose`, the value of each declared variable will be taken verbatim,
# including surrounding quotes.
# See: https://github.com/mastodon/mastodon/issues/16895
# Federation
# ----------
# This identifies your server and cannot be changed safely later
# ----------
# LOCAL_DOMAIN is set in env.secrets
#WEB_DOMAIN=social.example.com
# Redis
# -----
REDIS_HOST=redis
REDIS_PORT=6379
# PostgreSQL
# ----------
DB_HOST=database
DB_USER=mastodon
DB_NAME=mastodon_production
DB_PASS=mastodon
DB_PORT=5432
# Elasticsearch (optional)
# ------------------------
ES_ENABLED=true
ES_HOST=es
ES_PORT=9200
# Authentication for ES (optional)
ES_USER=elastic
ES_PASS=password
# Secrets
# -------
# Make sure to use `rake secret` to generate secrets
# -------
# written to env.secrets
#SECRET_KEY_BASE=abcdef1234
#OTP_SECRET=99991234
# Web Push
# --------
# Generate with `rake mastodon:webpush:generate_vapid_key`
# --------
# written to env.secrets
#VAPID_PRIVATE_KEY=
#VAPID_PUBLIC_KEY=
# Sending mail
# ------------
# configured in env.secrets
# File storage (optional)
# -----------------------
#S3_ENABLED=true
#S3_BUCKET=files.example.com
#AWS_ACCESS_KEY_ID=
#AWS_SECRET_ACCESS_KEY=
#S3_ALIAS_HOST=files.example.com
# do not allow normal logins
OMNIAUTH_ONLY=true
# OIDC supported since https://github.com/mastodon/mastodon/pull/16221
OIDC_ENABLED=true
OIDC_PROMPT=Keycloak
OIDC_DISCOVERY=true
OIDC_SCOPE=openid,profile
OIDC_UID_FIELD=preferred_username
OIDC_CLIENT_ID=mastodon
OIDC_SECURITY_ASSUME_EMAIL_IS_VERIFIED=true
# OIDC URLs are in env.secrets since they require env expansion
# OIDC_CLIENT_SECRET is in env.secrets

@ -0,0 +1,78 @@
#!/bin/bash
die() { echo >&2 "ERROR: $@" ; exit 1 ; }
info() { echo >&2 "$@" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production
source ./env.production
source "../env.smtp" 2>/dev/null
mkdir -p ../data/mastodon/system
chmod 777 ../data/mastodon/system
SECRETS="../data/mastodon/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "unable to restart mastodon"
exit 0
fi
# have to bring it all down before we touch the files
docker compose down
OIDC_CLIENT_SECRET="$(openssl rand -hex 32)"
# create the secrets file,
# along with some parameters that should be in the environment
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
WEB_DOMAIN=$MASTODON_HOSTNAME
LOCAL_DOMAIN=$DOMAIN_NAME
OIDC_DISPLAY_NAME=$REALM
OIDC_ISSUER=https://$KEYCLOAK_HOSTNAME/realms/$REALM
OIDC_REDIRECT_URI=https://$MASTODON_HOSTNAME/auth/auth/openid_connect/callback
OIDC_CLIENT_SECRET=$OIDC_CLIENT_SECRET
SECRET_KEY_BASE=$(openssl rand -hex 32)
OTP_SECRET=$(openssl rand -hex 32)
EOF
if [ -n "$SMTP_SERVER" ]; then
cat <<EOF >> "$SECRETS"
SMTP_SERVER=$SMTP_SERVER
SMTP_PORT=$SMTP_PORT
SMTP_LOGIN=$SMTP_USER
SMTP_PASSWORD=$SMTP_PASSWORD
SMTP_FROM_ADDRESS=mastodon@$DOMAIN_NAME
EOF
fi
info "mastodon: creating push keys"
docker compose run --rm mastodon \
rails mastodon:webpush:generate_vapid_key \
>> "$SECRETS" \
|| die "unable to generate vapid key"
info "mastodon: setting up database"
docker compose run --rm mastodon \
rails db:setup \
|| die "unable to login"
source "$SECRETS"
info "mastodon: creating keycloak interface"
../keycloak/client-delete mastodon
../keycloak/client-create <<EOF || die "Unable to create keycloak client"
{
"clientId": "mastodon",
"rootUrl": "https://$MASTODON_HOSTNAME/",
"adminUrl": "https://$MASTODON_HOSTNAME/",
"redirectUris": [ "https://$MASTODON_HOSTNAME/*" ],
"webOrigins": [ "https://$MASTODON_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$OIDC_CLIENT_SECRET"
}
EOF
docker compose up -d || die "mastodon: unable to start container"

@ -0,0 +1,31 @@
services:
postgres:
image: postgres:13.4-alpine
restart: unless-stopped
volumes:
- ../data/matrix/postgresdata:/var/lib/postgresql/data
- ../data/matrix/media_store:/data/media_store
environment:
- POSTGRES_DB=synapse
- POSTGRES_USER=synapse
- POSTGRES_PASSWORD=STRONGPASSWORD
element:
image: vectorim/element-web:v1.11.91
restart: unless-stopped
volumes:
- ../data/matrix/element-config.json:/app/config.json
ports:
- "5000:80"
labels:
- "diun.enable=true"
synapse:
image: ghcr.io/element-hq/synapse:v1.123.0
restart: unless-stopped
volumes:
- ../data/matrix/synapse:/data
ports:
- "5008:8008"
labels:
- "diun.enable=true"

@ -0,0 +1,73 @@
{
"default_server_config": {
"m.homeserver": {
"base_url": "https://${MATRIX_HOSTNAME}",
"server_name": "${DOMAIN_NAME}"
},
"m.identity_server": {
"base_url": "https://vector.im"
}
},
"brand": "Element",
"integrations_ui_url": "https://scalar.vector.im/",
"integrations_rest_url": "https://scalar.vector.im/api",
"integrations_widgets_urls": [
"https://scalar.vector.im/_matrix/integrations/v1",
"https://scalar.vector.im/api",
"https://scalar-staging.vector.im/_matrix/integrations/v1",
"https://scalar-staging.vector.im/api",
"https://scalar-staging.riot.im/scalar/api"
],
"hosting_signup_link": "https://element.io/matrix-services?utm_source=element-web&utm_medium=web",
"bug_report_endpoint_url": "https://element.io/bugreports/submit",
"uisi_autorageshake_app": "element-auto-uisi",
"showLabsSettings": true,
"piwik": {
"url": "https://piwik.riot.im/",
"siteId": 1,
"policyUrl": "https://element.io/cookie-policy"
},
"roomDirectory": {
"servers": [
"matrix.org",
"gitter.im",
"libera.chat"
]
},
"enable_presence_by_hs_url": {
"https://matrix.org": false,
"https://matrix-client.matrix.org": false
},
"terms_and_conditions_links": [
{
"url": "https://element.io/privacy",
"text": "Privacy Policy"
},
{
"url": "https://element.io/cookie-policy",
"text": "Cookie Policy"
}
],
"hostSignup": {
"brand": "Element Home",
"cookiePolicyUrl": "https://element.io/cookie-policy",
"domains": [
"matrix.org"
],
"privacyPolicyUrl": "https://element.io/privacy",
"termsOfServiceUrl": "https://element.io/terms-of-service",
"url": "https://ems.element.io/element-home/in-app-loader"
},
"sentry": {
"dsn": "https://029a0eb289f942508ae0fb17935bd8c5@sentry.matrix.org/6",
"environment": "develop"
},
"posthog": {
"projectApiKey": "phc_Jzsm6DTm6V2705zeU5dcNvQDlonOR68XvX2sh1sEOHO",
"apiHost": "https://posthog.element.io"
},
"features": {
"feature_spotlight": true
},
"map_style_url": "https://api.maptiler.com/maps/streets/style.json?key=fU3vlMsMn4Jb6dnEIFsx"
}

@ -0,0 +1 @@
# variables

@ -0,0 +1,97 @@
#!/bin/bash
die() { echo >&2 "matrix: ERROR $@" ; exit 1 ; }
info() { echo >&2 "matrix: $@" ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top levle env?"
source ../env.smtp 2>/dev/null
source env.production || die "no local env?"
DATA="../data/matrix"
SYNAPSE_DIR="$DATA/synapse"
HOMESERVER_YAML="$SYNAPSE_DIR/homeserver.yaml"
if [ -r "$HOMESERVER_YAML" ]; then
docker compose up -d || die "matrix: unable to restart"
exit 0
fi
docker compose down 2>/dev/null
mkdir -p "$DATA"
# fix up the Element client config to have the correct hostname
# based on the environment variables
export DOMAIN_NAME MATRIX_HOSTNAME
envsubst < "element-config.json.template" > "$DATA/element-config.json"
# This will create a *delegated* matrix server,
# where the "servername" is just the top level domain,
# but it is hosted on "matrix.DOMAIN_NAME".
# the syntax here is confusing and it is not clear in
# the docs *which* have to be updated.
docker compose run \
--rm \
-e SYNAPSE_SERVER_NAME="$DOMAIN_NAME" \
-e SYNAPSE_REPORT_STATS="no" \
synapse generate \
|| die "unable to generate synapse config"
MATRIX_CLIENT_SECRET="$(openssl rand -hex 20)"
#MAS_CLIENT_SECRET="$(openssl rand -hex 20)"
#MAS_ADMIN_TOKEN="$(openssl rand -hex 20)"
#export MAS_CLIENT_SECRET MAS_ADMIN_TOKEN
#export MAS_PROVIDER_ULID # from env.production
#export DOMAIN_NAME
#mkdir -p "$DATA/mas"
#envsubst < "mas-config.template" > "$DATA/mas/config"
cat <<EOF >> "$HOMESERVER_YAML"
web_client_location: https://${MATRIX_HOSTNAME}/
public_baseurl: https://${MATRIX_HOSTNAME}/
enable_registration: false
oidc_providers:
- idp_id: keycloak
idp_name: "KeyCloak"
issuer: "https://${KEYCLOAK_HOSTNAME}/realms/${REALM}"
client_id: "synapse"
client_secret: "${MATRIX_CLIENT_SECRET}"
scopes: ["openid", "profile"]
user_mapping_provider:
config:
localpart_template: "{{ user.preferred_username }}"
display_name_template: "{{ user.name }}"
EOF
if [ -n "$SMTP_SERVER" ]; then
info "configuring email"
cat <<EOF >> "$HOMESERVER_YAML"
email:
smtp_host: ${SMTP_SERVER}
smtp_port: ${SMTP_PORT}
smtp_user: "${SMTP_USER}"
smtp_pass: "${SMTP_PASSWORD}"
require_transport_security: true
notif_from: "%(app)s matrix homeserver <noreply@${DOMAIN_NAME}>"
app_name: ${DOMAIN_NAME}
EOF
fi
../keycloak/client-delete 'synapse' 2>/dev/null
../keycloak/client-create << EOF || die "unable to create client id"
{
"clientId": "synapse",
"rootUrl": "https://$MATRIX_HOSTNAME/",
"adminUrl": "https://$MATRIX_HOSTNAME/",
"redirectUris": [ "https://$MATRIX_HOSTNAME/*" ],
"webOrigins": [ "https://$MATRIX_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$MATRIX_CLIENT_SECRET"
}
EOF
docker compose up -d || die "matrix: unable to start container"

@ -0,0 +1,108 @@
# Mobilizon instance configuration
import Config
listen_ip = System.get_env("MOBILIZON_INSTANCE_LISTEN_IP", "0.0.0.0")
listen_ip =
case listen_ip |> to_charlist() |> :inet.parse_address() do
{:ok, listen_ip} -> listen_ip
_ -> raise "MOBILIZON_INSTANCE_LISTEN_IP does not match the expected IP format."
end
config :mobilizon, Mobilizon.Web.Endpoint,
server: true,
url: [host: System.get_env("MOBILIZON_INSTANCE_HOST", "mobilizon.lan")],
http: [
port: String.to_integer(System.get_env("MOBILIZON_INSTANCE_PORT", "4000")),
ip: listen_ip
],
secret_key_base: System.get_env("MOBILIZON_INSTANCE_SECRET_KEY_BASE", "changethis")
config :mobilizon, Mobilizon.Web.Auth.Guardian,
secret_key: System.get_env("MOBILIZON_INSTANCE_SECRET_KEY", "changethis")
config :mobilizon, :instance,
name: System.get_env("MOBILIZON_INSTANCE_NAME", "Mobilizon"),
description: "Change this to a proper description of your instance",
hostname: System.get_env("MOBILIZON_INSTANCE_HOST", "mobilizon.lan"),
registrations_open: System.get_env("MOBILIZON_INSTANCE_REGISTRATIONS_OPEN", "false") == "true",
demo: false,
allow_relay: true,
federating: true,
email_from: System.get_env("MOBILIZON_INSTANCE_EMAIL", "woodbine@celehner.com"),
email_reply_to: System.get_env("MOBILIZON_REPLY_EMAIL", "woodbine.events@celehner.com")
config :mobilizon, Mobilizon.Storage.Repo,
adapter: Ecto.Adapters.Postgres,
username: System.get_env("MOBILIZON_DATABASE_USERNAME", "username"),
password: System.get_env("MOBILIZON_DATABASE_PASSWORD", "password"),
database: System.get_env("MOBILIZON_DATABASE_DBNAME", "mobilizon"),
hostname: System.get_env("MOBILIZON_DATABASE_HOST", "postgres"),
port: 5432,
pool_size: 10
config :mobilizon, Mobilizon.Web.Email.Mailer,
adapter: Swoosh.Adapters.SMTP,
relay: System.get_env("MOBILIZON_SMTP_SERVER", "localhost"),
port: System.get_env("MOBILIZON_SMTP_PORT", "25"),
username: System.get_env("MOBILIZON_SMTP_USERNAME", nil),
password: System.get_env("MOBILIZON_SMTP_PASSWORD", nil),
tls: :if_available,
allowed_tls_versions: [:tlsv1, :"tlsv1.1", :"tlsv1.2"],
ssl: System.get_env("MOBILIZON_SMTP_SSL", "false"),
retries: 2,
no_mx_lookups: false,
auth: :always
config :geolix,
databases: [
%{
id: :city,
adapter: Geolix.Adapter.MMDB2,
source: "/var/lib/mobilizon/geo_db/GeoLite2-City.mmdb"
}
]
config :mobilizon, Mobilizon.Web.Upload.Uploader.Local,
uploads: System.get_env("MOBILIZON_UPLOADS", "/var/lib/mobilizon/uploads")
config :mobilizon, :exports,
path: System.get_env("MOBILIZON_UPLOADS_EXPORTS", "/var/lib/mobilizon/uploads/exports"),
formats: [
Mobilizon.Service.Export.Participants.CSV,
Mobilizon.Service.Export.Participants.PDF,
Mobilizon.Service.Export.Participants.ODS
]
config :tz_world,
data_dir: System.get_env("MOBILIZON_TIMEZONES_DIR", "/var/lib/mobilizon/timezones")
#
# keycloak config for hackerspace.zone self hosted single-sign-on
#
keycloak_hostname = System.get_env("KEYCLOAK_HOSTNAME", "keycloak.example.com")
keycloak_realm = System.get_env("REALM", "example")
keycloak_secret = System.get_env("MOBILIZON_CLIENT_SECRET", "abcdef1234")
keycloak_url = "https://#{keycloak_hostname}/realms/#{keycloak_realm}"
config :ueberauth,
Ueberauth,
providers: [
keycloak: {Ueberauth.Strategy.Keycloak, [default_scope: "openid"]}
]
config :mobilizon, :auth,
oauth_consumer_strategies: [
{:keycloak, "#{keycloak_hostname}"}
]
config :ueberauth, Ueberauth.Strategy.Keycloak.OAuth,
client_id: "mobilizon",
client_secret: keycloak_secret,
site: keycloak_url,
authorize_url: "#{keycloak_url}/protocol/openid-connect/auth",
token_url: "#{keycloak_url}/protocol/openid-connect/token",
userinfo_url: "#{keycloak_url}/protocol/openid-connect/userinfo",
token_method: :post

@ -0,0 +1,25 @@
services:
mobilizon:
image: kaihuri/mobilizon:5.1.4
mem_limit: 512m
restart: always
env_file:
- ../env.production
- ./env.production
- ../data/mobilizon/env.secrets
volumes:
- ../data/mobilizon/uploads:/var/lib/mobilizon/uploads
- ./config.exs:/etc/mobilizon/config.exs:ro
# - ${PWD}/GeoLite2-City.mmdb:/var/lib/mobilizon/geo_db/GeoLite2-City.mmdb
ports:
- "7000:7000"
db:
image: postgis/postgis:13-3.4
restart: always
volumes:
- ../data/mobilizon/db:/var/lib/postgresql/data
environment:
- POSTGRES_USER=mobilizon
- POSTGRES_PASSWORD=mobilizon
- POSTGRES_DB=mobilizon

@ -0,0 +1,24 @@
# Database settings
POSTGRES_USER=mobilizon
POSTGRES_PASSWORD=changethis
POSTGRES_DB=mobilizon
MOBILIZON_DATABASE_USERNAME=mobilizon
MOBILIZON_DATABASE_PASSWORD=mobilizon
MOBILIZON_DATABASE_DBNAME=mobilizon
MOBILIZON_DATABASE_HOST=db
# Instance configuration
MOBILIZON_INSTANCE_REGISTRATIONS_OPEN=false
MOBILIZON_INSTANCE_PORT=7000
MOBILIZON_INSTANCE_EMAIL=noreply@mobilizon.lan
MOBILIZON_REPLY_EMAIL=contact@mobilizon.lan
# Email settings
MOBILIZON_SMTP_SERVER=localhost
MOBILIZON_SMTP_PORT=25
MOBILIZON_SMTP_HOSTNAME=localhost
MOBILIZON_SMTP_USERNAME=noreply@mobilizon.lan
MOBILIZON_SMTP_PASSWORD=password
MOBILIZON_SMTP_SSL=false

@ -0,0 +1,62 @@
#!/bin/bash
die() { echo >&2 "mobilizon: $@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
source ../env.smtp 2>/dev/null
DATA="../data/mobilizon"
SECRETS="$DATA/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "unable to start"
exit 0
fi
docker compose down 2>/dev/null
CLIENT_SECRET="$(openssl rand -hex 20)"
mkdir -p "$DATA/uploads"
chmod 777 "$DATA/uploads"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# DO NOT CHECK IN
MOBILIZON_INSTANCE_NAME=${DOMAIN_NAME}
MOBILIZON_INSTANCE_HOST=${MOBILIZON_HOSTNAME}
MOBILIZON_INSTANCE_SECRET_KEY_BASE=$(openssl rand -hex 20)
MOBILIZON_INSTANCE_SECRET_KEY=$(openssl rand -hex 20)
MOBILIZON_CLIENT_SECRET=${CLIENT_SECRET}
EOF
if [ -n "$SMTP_SERVER" ]; then
cat <<EOF >> "$SECRETS"
MOBILIZON_INSTANCE_EMAIL=events@${DOMAIN_NAME}
MOBILIZON_REPLY_EMAIL=noreply@${DOMAIN_NAME}
MOBILIZON_SMTP_SERVER=${SMTP_SERVER}
MOBILIZON_SMTP_PORT=${SMTP_PORT}
MOBILIZON_SMTP_USERNAME=${SMTP_USER}
MOBILIZON_SMTP_PASSWORD=${SMTP_PASSWORD}
EOF
fi
../keycloak/client-delete mobilizon
../keycloak/client-create <<EOF || die "unable to create client"
{
"clientId": "mobilizon",
"rootUrl": "https://$MOBILIZON_HOSTNAME",
"adminUrl": "https://$MOBILIZON_HOSTNAME",
"redirectUris": [ "https://$MOBILIZON_HOSTNAME/*" ],
"webOrigins": [ "https://$MOBILIZON_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$CLIENT_SECRET"
}
EOF
docker compose up -d || die "unable to start container"

@ -0,0 +1,36 @@
services:
database:
image: postgres:16-alpine
restart: always
environment:
- POSTGRES_USER=nextcloud
- POSTGRES_PASSWORD=nextcloud
- POSTGRES_DB=nextcloud
volumes:
- ../data/nextcloud/database:/var/lib/postgresql/data
nextcloud:
image: nextcloud:28-apache
restart: always
ports:
- "9000:80"
env_file:
- ../env.production
- env.production
- ../data/nextcloud/env.secrets
environment:
POSTGRES_HOST: database
POSTGRES_DB: nextcloud
POSTGRES_USER: nextcloud
POSTGRES_PASSWORD: nextcloud
OVERWRITEPROTOCOL: https
NEXTCLOUD_ADMIN_USER: admin
# NEXTCLOUD_ADMIN_PASSWORD in env.secrets
# NEXTCLOUD_TRUSTED_DOMAINS also set in env.secrets
volumes:
- ../data/nextcloud/nextcloud:/var/www/html
depends_on:
- database
labels:
- "diun.enable=true"

@ -0,0 +1 @@
# non-secret nextcloud config

@ -0,0 +1,82 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env?"
source env.production || die "no local env?"
SECRETS="../data/nextcloud/env.secrets"
if [ -r "$SECRETS" ]; then
docker compose up -d || die "nextcloud: unable to start"
exit 0
fi
docker compose down 2>/dev/null
NEXTCLOUD_CLIENT_SECRET="$(openssl rand -hex 32)"
NEXTCLOUD_ADMIN_PASSWORD="$(openssl rand -hex 6)"
echo "Generating secrets: admin password $NEXTCLOUD_ADMIN_PASSWORD"
mkdir -p "$(dirname "$SECRETS")"
cat <<EOF > "$SECRETS"
# Do not check in!
NEXTCLOUD_ADMIN_PASSWORD=$NEXTCLOUD_ADMIN_PASSWORD
NEXTCLOUD_TRUSTED_DOMAINS=$NEXTCLOUD_HOSTNAME
NEXTCLOUD_CLIENT_SECRET=$NEXTCLOUD_CLIENT_SECRET
EOF
BASE="https://$KEYCLOAK_HOSTNAME/realms/$REALM/protocol/openid-connect"
PROVIDER="$(jq -c . <<EOF
{
"custom_oidc": [
{
"name": "keycloak",
"title": "Keycloak",
"clientId": "nextcloud",
"clientSecret": "$NEXTCLOUD_CLIENT_SECRET",
"authorizeUrl": "$BASE/auth",
"tokenUrl": "$BASE/token",
"userInfoUrl": "$BASE/userinfo",
"logoutUrl": "$BASE/logout",
"scope": "openid",
"groupsClaim": "roles",
"style": "keycloak",
"displayNameClaim": "",
"defaultGroup": ""
}
]
}
EOF
)"
docker compose up -d || die "unable to bring up docker"
# wait for the nextcloud instance to be responsive
# TODO: how to find out if it is ready?
echo "Sleeping a minute while nextcloud installs"
sleep 60
docker compose exec -u www-data -T nextcloud bash -x <<EOF || die "unable to configure sociallogin"
./occ app:install calendar
./occ app:install sociallogin
./occ config:app:set sociallogin prevent_create_email_exists --value=1 || exit 1
./occ config:app:set sociallogin update_profile_on_login --value=1 || exit 1
./occ config:app:set sociallogin custom_providers --value='$PROVIDER' || exit 1
EOF
../keycloak/client-delete 'nextcloud' || echo "client did not exist?"
../keycloak/client-create << EOF || die "unable to create client id"
{
"clientId": "nextcloud",
"rootUrl": "https://$NEXTCLOUD_HOSTNAME/",
"adminUrl": "https://$NEXTCLOUD_HOSTNAME/",
"redirectUris": [ "https://$NEXTCLOUD_HOSTNAME/*" ],
"webOrigins": [ "https://$NEXTCLOUD_HOSTNAME" ],
"clientAuthenticatorType": "client-secret",
"secret": "$NEXTCLOUD_CLIENT_SECRET"
}
EOF

@ -0,0 +1,44 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $(realpath "$0"))"
cd "$DIRNAME"
source ../env.production
source ./env.production
domain_args="-d $DOMAIN_NAME,$KEYCLOAK_HOSTNAME,$HEDGEDOC_HOSTNAME,$MASTODON_HOSTNAME,$NEXTCLOUD_HOSTNAME,$MATRIX_HOSTNAME,$GITEA_HOSTNAME,$MOBILIZON_HOSTNAME,$BOOKSTACK_HOSTNAME,$ZULIP_HOSTNAME"
rsa_key_size=2048
set -x
# move the temp live directory away if
# this is the first time we've run anything here
if [ ! -d "../data/certbot/conf/accounts" ]; then
echo "deleting temp keys"
rm -rf ../data/certbot/conf/live
fi
# try to work around https://git.woodbine.nyc/cel/hackerspace-zone-mirror/issues/12
mkdir -p ../data/certbot/conf/archive/old
mv ../data/certbot/conf/archive/"${DOMAIN_NAME}"* ../data/certbot/conf/archive/old/
docker compose run --rm certbot \
certonly \
--webroot \
--webroot-path /var/www/certbot \
--email "admin@$DOMAIN_NAME" \
--rsa-key-size "$rsa_key_size" \
--agree-tos \
--no-eff-email \
--force-renewal \
$domain_args \
|| die "unable to renew!"
latest_folder=$(ls -t ../data/certbot/conf/archive/ | head -n1)
creds=../data/certbot/conf/archive/${latest_folder}
live=../data/certbot/conf/live/woodbine.nyc
ln -frs ${creds}/fullchain1.pem ${live}/fullchain.pem
ln -frs ${creds}/privkey1.pem ${live}/privkey.pem
docker compose exec nginx nginx -s reload

@ -0,0 +1,46 @@
services:
nginx:
image: nginx:1.25-alpine
restart: always
ports:
- "80:80"
- "443:443"
volumes:
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
- ./nginx/templates:/etc/nginx/templates:ro
- ./nginx/includes:/etc/nginx/includes:ro
- ../html:/var/www/html:ro
- /var/www/2022:/var/www/2022:ro
- /var/www/2023:/var/www/2023:ro
- /var/www/2024:/var/www/2024:ro
- ../data/certbot/www:/var/www/certbot:ro
- ../data/certbot/conf:/etc/letsencrypt:ro
- ../data/nginx/cache:/data/nginx/cache:rw
- /home:/home:ro
env_file:
- ../env.production
- env.production
extra_hosts:
- "host.docker.internal:host-gateway"
certbot:
image: certbot/certbot
volumes:
- ../data/certbot/conf:/etc/letsencrypt
- ../data/certbot/www:/var/www/certbot
anubis-nginx:
image: ghcr.io/techarohq/anubis:latest
environment:
BIND: "/run/anubis/nginx.sock"
DIFFICULTY: "4"
METRICS_BIND: ":9090"
SERVE_ROBOTS_TXT: "true"
TARGET: "http://nginx"
POLICY_FNAME: "/data/cfg/botPolicy.yaml"
OG_PASSTHROUGH: "true"
OG_EXPIRY_TIME: "24h"
ports:
- 8080:8080
# volumes:
# - "./botPolicy.yaml:/data/cfg/botPolicy.yaml:ro"

@ -0,0 +1,3 @@
location /.well-known/acme-challenge/ {
root /var/www/certbot;
}

@ -0,0 +1,14 @@
# This file contains important security parameters. If you modify this file
# manually, Certbot will be unable to automatically provide future security
# updates. Instead, Certbot will print and log an error message with a path to
# the up-to-date file that you will need to refer to when manually updating
# this file.
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_timeout 1440m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA";

@ -0,0 +1,8 @@
-----BEGIN DH PARAMETERS-----
MIIBCAKCAQEA//////////+t+FRYortKmq/cViAnPTzx2LnFg84tNpWp4TZBFGQz
+8yTnc4kmz75fS/jY2MMddj2gbICrsRhetPfHtXV/WVhJDP1H18GbtCFY2VVPe0a
87VXE15/V8k1mE8McODmi3fipona8+/och3xWKE2rec1MKzKT0g6eXq8CrGCsyT7
YdEIqUuyyOP7uWrat2DX9GgdT0Kj3jlN9K5W7edjcrsZCwenyO4KbXCeAvzhzffi
7MA0BM0oNC9hkXL+nOmFg/+OTxIy7vKBg8P+OxtMb61zO7X8vC7CIAXFjvGDfRaD
ssbzSibBsu/6iGtCOGEoXJf//////////wIBAg==
-----END DH PARAMETERS-----

@ -0,0 +1,95 @@
#user www-data;
worker_processes auto;
pid /run/nginx.pid;
include /etc/nginx/modules-enabled/*.conf;
env DOMAIN_NAME;
events {
worker_connections 768;
# multi_accept on;
}
http {
##
# Basic Settings
##
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
# server_tokens off;
server_names_hash_bucket_size 64;
# server_name_in_redirect off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
##
# SSL Settings
##
ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3; # Dropping SSLv3, ref: POODLE
ssl_prefer_server_ciphers on;
##
# Logging Settings
##
#access_log syslog:server=localhost;
#error_log syslog:server=localhost;
##
# Gzip Settings
##
gzip on;
# gzip_vary on;
# gzip_proxied any;
# gzip_comp_level 6;
# gzip_buffers 16 8k;
# gzip_http_version 1.1;
# gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript;
proxy_cache_path
/data/nginx/cache
keys_zone=mycache:10m
loader_threshold=300
loader_files=200
max_size=200m;
##
# Virtual Host Configs
##
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*;
include /tmp/sites-enabled/*;
}
#mail {
# # See sample authentication script at:
# # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript
#
# # auth_http localhost/auth.php;
# # pop3_capabilities "TOP" "USER";
# # imap_capabilities "IMAP4rev1" "UIDPLUS";
#
# server {
# listen localhost:110;
# protocol pop3;
# proxy on;
# }
#
# server {
# listen localhost:143;
# protocol imap;
# proxy on;
# }
#}

@ -0,0 +1,125 @@
# Redirect *all* port 80 traffic to the same thing on port 443
server {
listen 80 default_server;
location / {
if ($http_user_agent ~* "(AdsBot-Google|Amazonbot|anthropic-ai|Applebot|Applebot-Extended|AwarioRssBot|AwarioSmartBot|Bytespider|CCBot|ChatGPT-User|ClaudeBot|Claude-Web|cohere-ai|DataForSeoBot|Diffbot|FacebookBot|FriendlyCrawler|Google-Extended|GoogleOther|GPTBot|img2dataset|ImagesiftBot|magpie-crawler|Meltwater|omgili|omgilibot|peer39_crawler|peer39_crawler/1.0|PerplexityBot|PiplBot|scoop.it|Seekr|YouBot)"){
return 307 https://ash-speed.hetzner.com/10GB.bin;
}
return 301 https://$host$request_uri;
}
}
server {
#server_name ${DOMAIN_NAME} default;
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
#default_type application/octet-stream;
#
if ($http_user_agent ~* "(AdsBot-Google|Amazonbot|anthropic-ai|Applebot|Applebot-Extended|AwarioRssBot|AwarioSmartBot|Bytespider|CCBot|ChatGPT-User|ClaudeBot|Claude-Web|cohere-ai|DataForSeoBot|Diffbot|FacebookBot|FriendlyCrawler|Google-Extended|GoogleOther|GPTBot|img2dataset|ImagesiftBot|magpie-crawler|Meltwater|omgili|omgilibot|peer39_crawler|peer39_crawler/1.0|PerplexityBot|PiplBot|scoop.it|Seekr|YouBot)"){
return 307 https://ash-speed.hetzner.com/10GB.bin;
}
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
# Forward to anubis
location / {
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_pass http://anubis;
}
location = /robots.txt {
add_header Content-Type text/plain;
return 200 "User-agent: *\nDisallow: /\n";
}
# delegated Matrix server
location /.well-known/matrix {
proxy_pass https://${MATRIX_HOSTNAME};
}
location /.well-known/webfinger {
proxy_pass https://${MASTODON_HOSTNAME};
}
# separate Mastodon WEB_DOMAIN and LOCAL_DOMAIN
location = /.well-known/host-meta {
return 302 https://${MASTODON_HOSTNAME}$request_uri;
}
# tilde club home directories
location ~ ^/~(.+?)(/.*)?$ {
alias /home/$1/public_html$2;
index index.html index.htm;
autoindex on;
}
# /html files are served from the static html site
location /html {
root /var/www;
autoindex off;
}
# /YEAR static files
location /2022 { root /var/www; autoindex off; }
location /2023 { root /var/www; autoindex off; }
location /2024 { root /var/www; autoindex off; }
# default home page goes to hedgedoc document "Main_Page"; please add your own content!
location = / {
#return 302 https://${DOMAIN_NAME}/Main_Page;
return 302 https://www.${DOMAIN_NAME};
}
# redirect to squarespace pages on www
location ~ ^/(about|events|mutualaid|farmshare|writings|podcasts|videos|press)$ {
return 302 https://www.${DOMAIN_NAME}/$1;
}
# rewrite /s/ links to the bare link
location ~ ^/s/(.*) {
return 302 https://${DOMAIN_NAME}/$1;
}
# normal pages go to hedgedoc static site (need to define ports in the env)
# need to rewrite the CSP so that it allows reframing from the main site
location / {
proxy_cache mycache;
add_header X-Cache-Status $upstream_cache_status;
proxy_ignore_headers Cache-Control;
proxy_hide_header Content-Security-Policy;
add_header Content-Security-Policy "script-src 'self' 'unsafe-inline' 'unsafe-eval' *.${DOMAIN_NAME}; frame-src 'self' *.${DOMAIN_NAME}; object-src 'self'; base-uri 'self' *.${DOMAIN_NAME}";
proxy_pass http://host.docker.internal:3000/s$request_uri;
proxy_cache_valid any 1m;
}
# while javascript and config stuff goes to non-static hedgedoc site
location ~ ^/(js|build|config$) {
proxy_cache mycache;
add_header X-Cache-Status $upstream_cache_status;
proxy_ignore_headers Cache-Control;
proxy_cache_valid any 1m;
proxy_pass http://host.docker.internal:3000$request_uri;
}
listen 443 ssl default_server;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,76 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name ${MATRIX_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
#default_type application/octet-stream;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://host.docker.internal:5000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^(/_matrix|/_synapse/client) {
# note: do not add a path (even a single /) after the port in `proxy_pass`,
# otherwise nginx will canonicalise the URI and cause signature verification
# errors.
proxy_pass http://host.docker.internal:5008;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
# Nginx by default only allows file uploads up to 1M in size
# Increase client_max_body_size to match max_upload_size defined in homeserver.yaml
client_max_body_size 50M;
}
# serve the static content for the well known files
location /.well-known/matrix/server {
default_type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.server": "${MATRIX_HOSTNAME}:443"}';
}
location /.well-known/matrix/client {
default_type application/json;
add_header Access-Control-Allow-Origin *;
return 200 '{"m.homeserver":{"base_url": "https://${MATRIX_HOSTNAME}"}}';
}
# The federation port is not enabled; go through 443
#listen 8448 ssl http2 default_server;
#listen [::]:8448 ssl http2 default_server;
# For the user connection
listen 443 ssl;
http2 on;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,46 @@
server {
server_name ${NEXTCLOUD_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
#default_type application/octet-stream;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location /.well-known/carddav {
return 301 $scheme://$host/remote.php/dav;
}
location /.well-known/caldav {
return 301 $scheme://$host/remote.php/dav;
}
location / {
proxy_pass http://host.docker.internal:9000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,52 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name ${GRAFANA_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
#include /etc/nginx/mime.types;
#default_type application/octet-stream;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://host.docker.internal:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
location /socket.io/ {
proxy_pass http://host.docker.internal:8000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,52 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name ${HEDGEDOC_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
proxy_cache mycache;
proxy_cache_valid any 1m;
add_header X-Cache-Status $upstream_cache_status;
# required to avoid HTTP 411: see Issue #1486 (https://github.com/dotcloud/docker/issues/1486)
chunked_transfer_encoding on;
location / {
proxy_pass http://host.docker.internal:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# allow the fonts to be used by anything
location ~* \.(eot|otf|ttf|woff|woff2)$ {
add_header Access-Control-Allow-Origin *;
proxy_pass http://host.docker.internal:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,38 @@
server {
server_name ${MOBILIZON_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
location / {
proxy_pass http://host.docker.internal:7000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# force login with OIDC
location /login {
return 302 https://${MOBILIZON_HOSTNAME}/auth/keycloak;
}
# temporary redirect for DWebNY; may be removed after 2023-01-31
location /@dwebny { return 302 https://events.woodbine.nyc/@dwebny@mobilizon.us; }
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,40 @@
server {
listen unix:/run/nginx/nginx.sock;
server_name ${GITEA_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
location / {
proxy_pass http://host.docker.internal:3030;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
# force login with OIDC
location /user/login {
return 302 https://${GITEA_HOSTNAME}/user/oauth2/keycloak;
}
# listen 443 ssl;
# ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
# ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
# include /etc/nginx/includes/options-ssl-nginx.conf;
# include /etc/nginx/includes/challenge.conf;
# ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,28 @@
server {
listen 80;
listen [::]:80;
location / {
return 301 https://$host$request_uri;
}
}
server {
listen 443 ssl;
listen [::]:443 ssl;
http2 on;
server_name ${ZULIP_HOSTNAME};
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
location / {
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_buffering off;
proxy_read_timeout 20m;
proxy_pass https://host.docker.internal:8774;
}
}

@ -0,0 +1,25 @@
server {
server_name login.${DOMAIN_NAME};
client_max_body_size 128m;
location / {
proxy_pass http://host.docker.internal:8080;
proxy_pass_header Set-Cookie;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
}
# location /.well-known/openid-configuration {
# return 301 $scheme://$host/.well-known/openid-configuration;
# }
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,30 @@
server {
server_name ${PIXELFED_HOSTNAME};
client_max_body_size 128m;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
types_hash_max_size 2048;
gzip on;
gzip_disable "msie6";
proxy_read_timeout 1800s;
location / {
proxy_pass http://host.docker.internal:8090;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,41 @@
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
server {
server_name social.${DOMAIN_NAME};
client_max_body_size 128m;
location / {
proxy_pass http://host.docker.internal:6001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Forwarded-Proto https;
}
location /api/v1/streaming {
proxy_pass http://host.docker.internal:4000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_buffering off;
proxy_redirect off;
proxy_http_version 1.1;
tcp_nodelay on;
}
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,14 @@
upstream anubis {
# Make sure this matches the values you set for `BIND` and `BIND_NETWORK`.
# If this does not match, your services will not be protected by Anubis.
# Try anubis first over a UNIX socket
server unix:/run/anubis/nginx.sock;
#server 127.0.0.1:8923;
# Optional: fall back to serving the websites directly. This allows your
# websites to be resilient against Anubis failing, at the risk of exposing
# them to the raw internet without protection. This is a tradeoff and can
# be worth it in some edge cases.
#server unix:/run/nginx.sock backup;
}

@ -0,0 +1,22 @@
server {
server_name ${BOOKSTACK_HOSTNAME};
client_max_body_size 0;
location / {
proxy_pass http://host.docker.internal:6875;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
}
listen 443 ssl;
listen [::]:443 ssl;
http2 on;
ssl_certificate /etc/letsencrypt/live/${DOMAIN_NAME}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/${DOMAIN_NAME}/privkey.pem;
include /etc/nginx/includes/options-ssl-nginx.conf;
include /etc/nginx/includes/challenge.conf;
ssl_dhparam /etc/nginx/includes/ssl-dhparams.pem;
}

@ -0,0 +1,39 @@
#!/bin/bash
die() { echo >&2 "$@" ; exit 1 ; }
DIRNAME="$(dirname $0)"
cd "$DIRNAME"
source ../env.production || die "no top level env"
source env.production || die "no local env"
if [ -z "${DOMAIN_NAME}" ]; then
die "DOMAIN_NAME not set"
fi
certdir="../data/certbot/conf/live/${DOMAIN_NAME}"
if [ -r "$certdir/privkey.pem" ]; then
docker compose up -d || die "nginx: unable to start"
exit 0
fi
mkdir -p "$certdir" || die "$certdir: unable to make"
openssl req \
-x509 \
-newkey rsa:2048 \
-keyout "$certdir/privkey.pem" \
-out "$certdir/fullchain.pem" \
-sha256 \
-nodes \
-days 365 \
-subj "/CN=${DOMAIN_NAME}'" \
|| die "$certdir/privkey.pem: unable to create temp key"
docker compose up -d || die "unable to bring up nginx"
echo "SLEEPING..."
sleep 10
./certbot-renew || die "unable to create certs"

@ -1,153 +0,0 @@
# community services for woodbine.nyc
Experiment in digital autonomy
Latest code is hosted on https://git.woodbine.nyc/micro/woodbine.nyc
If you are new to running your own websites, welcome!
Note that a "service" is a fuzzy name for software that is expected to be always running.
A simple web server (`python3 -m http.server`) could be a service, as could something like Gmail.
## Goals
Understandable
- a person should be able to adapt this to their community while learning the least amount of new concepts and technology
- the person who set it up should not be needed to maintain the services
Resiliant
- services should work even when other parts of the web are not accessible
Lean
- we prefer lightweight software, which usually require less long-term maintenance
## Decisions
There are many other kinds of digital autonomy, but most people are used to the web.
We hope to share our decision making here, so you can follow our thought process.
### Decisions made for you
These needs are required for anyone who wants to deploy **web-based** services.
#### Auth
We need a way for people to either register an account or sign in with an external account to use the services.
After trying authelia, zitadel, authentik, and keycloak, got the furthest with zitadel.
#### Web
To host a webpage, you need some software that listens for http requests. We chose Caddy.
If you would like to edit the webpage, either change the files in `./data/web/site/` directly, or you can connect via WebDAV and edit the file remotely via https://web.localhost.
#### Backup
If you will be helping a community, its important to have backups and restore. We have two helper services, `backup-files` and `backup-database`.
These use duplicity to backup to a backblaze instance, so you will need to setup that beforehand.
#### Secrets
We have two helper services for making sure secrets exist (`check-secrets`), or generating unique secrets for other services that need them (`generate-secrets`).
---
## getting started
### setup
Make a backblaze B2 account for backups. Add the secrets to ./secrets/backup/.
Fill out env.template and make sure to pass it in the next command
### running
Helper scripts can be found in [the scripts directory](./scripts)
To start
./scripts/up
To stop, you can press ctrl+c, or in another terminal run
./scripts/down
To generate secrets for all services ahead-of-time
./scripts/generate-secrets
### port forwarding
The caddy service expects to be able to bind to ports 80 and 443
One simple way is to allow unprivileged users access to these low ports
If you are on linux, you can run
$ sudo sysctl -w net.ipv4.ip_unprivileged_port_start=80
$ echo 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee -a /etc/sysctl.conf
The first command will set privileges until reboot. The second will make those privileges permanent.
If you are on macOS, using podman, you will want to run those commands in the linux virtual machine
$ podman machine ssh
core@localhost:~$ echo 'net.ipv4.ip_unprivileged_port_start=80' | sudo tee -a /etc/systctl.conf
core@localhost:~$ sudo sysctl -w net.ipv4.ip_unprivileged_port_start=80
---
## design
All the services are defined by docker compose files.
We provide `backup-files`, `backup-database`, `check-secrets`, and `generate-secrets` helper services.
We have configured Caddy to import all files found in /etc/caddy.d/, so if you want to add a new service, you will need to make a small `Proxyfile` to tell caddy what subdomain to forward to what port.
See [the services readme](./services/readme.md) for a guide on adding a new service.
---
## roadmap
### alpha
- [ ] decide on single postgres instance or multiple
- [ ] postgres backup (duplicity)
- [ ] single sign-on for webdav (one user per folder)
- [ ] single sign-on for one more service
- [x] identity provider (zitadel)
- [x] file backup (duplicity)
- [x] reverse proxy (caddy)
- [x] personal home pages (caddy-webdav)
- [x] setup notifications via smtp
### beta
- [ ] file restore
- [ ] postgres restore
- [ ] wiki
- [ ] matrix server (dendrite)
- [ ] mail server (stalwart or maddy)
- [ ] mailing list (listmonk)
- [ ] code forge (gitea or forgejo)
### 0.1
- [ ] only expose 443, 587, 993
- [ ] running on beta.woodbine.nyc
- [ ] audit on secrets management
- [ ] audit on mail server
- [ ] audit on general architecture
## credits
thank you https://hackerspace.zone

@ -1,8 +0,0 @@
podman compose --env-file ${ENV_FILE:-.env} \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
--file services/git.yaml \
down --volumes

@ -1,8 +0,0 @@
podman compose --env-file ${ENV_FILE:-.env} \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
--file services/git.yaml \
exec "$@"

@ -1,4 +0,0 @@
echo generating zitadel secrets; {
openssl rand -hex 16 | tr -d '\n' >! secrets/auth/zitadel/MASTER_KEY
openssl rand -hex 32 | tr -d '\n' >! secrets/auth/zitadel/STORAGE_PASSWORD
}

@ -1,8 +0,0 @@
podman compose --env-file ${ENV_FILE:-.env} \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
--file services/git.yaml \
ps

@ -1,8 +0,0 @@
podman compose --env-file ${ENV_FILE:-.env} \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
--file services/git.yaml \
pull

@ -1,8 +0,0 @@
podman compose --env-file ${ENV_FILE:-.env} \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
--file services/git.yaml \
run "$@"

@ -1,8 +0,0 @@
podman compose --env-file ${ENV_FILE:-.env} \
--file services/secrets.yaml \
--file services/backup.yaml \
--file services/proxy.yaml \
--file services/auth.yaml \
--file services/web.yaml \
--file services/git.yaml \
up --build

@ -1,3 +0,0 @@
Do not check in anything in this directory
Check out ../services/secrets.yaml on how to make it easy to check that secrets are defined, or to generate secrets on start

@ -1,69 +0,0 @@
secrets:
MASTER_KEY:
file: ../secrets/auth/zitadel/MASTER_KEY
services:
backup:
volumes:
- ../data/auth:/mnt/backup/src/auth:ro
generate-secrets:
volumes:
- ../secrets/auth/zitadel/MASTER_KEY:/secrets/auth/zitadel/MASTER_KEY
zitadel:
restart: 'unless-stopped'
image: 'ghcr.io/zitadel/zitadel:v2.48.3'
environment:
ZITADEL_DATABASE_COCKROACH_HOST: crdb
ZITADEL_EXTERNALSECURE: true
ZITADEL_EXTERNALDOMAIN: auth.${DOMAIN}
ZITADEL_EXTERNALPORT: 443
ZITADEL_WEBAUTHN_NAME: ${DOMAIN}
ZITADEL_FIRSTINSTANCE_ORG_NAME: basement
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_USERNAME: ${ADMIN_USER}
ZITADEL_FIRSTINSTANCE_ORG_HUMAN_PASSWORD: ${ADMIN_PASS}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_HOST: "${SMTP_ADDR}:${SMTP_PORT}"
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_USER: ${SMTP_USER}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_PASSWORD: ${SMTP_PASS}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_SSL: true
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROM: basement@mail.${DOMAIN}
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROMNAME: basement
ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_REPLYTOADDRESS: basement@mail.${DOMAIN}
secrets:
- MASTER_KEY
command: "start-from-init --masterkeyFile /run/secrets/MASTER_KEY --tlsMode external"
depends_on:
generate-secrets:
condition: 'service_completed_successfully'
caddy:
condition: 'service_healthy'
crdb:
condition: 'service_healthy'
ports:
- '8080:8080'
crdb:
restart: unless-stopped
image: 'cockroachdb/cockroach:latest-v23.1'
depends_on:
generate-secrets:
condition: 'service_completed_successfully'
command: "start-single-node --insecure --store=path=/cockroach/cockroach-data,size=20%"
healthcheck:
test: ["CMD", "curl", "--fail", "http://localhost:8080/health?ready=1"]
interval: '10s'
timeout: '30s'
retries: 5
start_period: '20s'
ports:
- '9090:8080'
- '26257:26257'
volumes:
- ../data/auth/crdb/data:/cockroach/cockroach-data:rw
caddy:
volumes:
- ./auth/Proxyfile:/etc/caddy.d/zitadel:ro

@ -1,4 +0,0 @@
auth.{$DOMAIN}:443 {
reverse_proxy zitadel:8080
tls internal
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save