89 Commits

Author SHA1 Message Date
35cdbae969 Update authentik 2025-07-19 11:10:28 +05:30
d2ec985c38 Update authentik 2025-07-19 11:02:01 +05:30
36e389329b Merge pull request 'Update docker.io/kyantech/palmr Docker tag to v3.1.3' (#18) from renovate/docker.io-kyantech-palmr-3.x into main
Reviewed-on: #18
2025-07-19 10:43:44 +05:30
9fc4e8b07e Merge pull request 'Update docker.io/minio/minio Docker tag to RELEASE.2025-07-18T21-56-31Z' (#20) from renovate/docker.io-minio-minio-2025.x into main
Reviewed-on: #20
2025-07-19 10:41:43 +05:30
Renovate Bot
cd20ad7ba9 Update docker.io/minio/minio Docker tag to RELEASE.2025-07-18T21-56-31Z 2025-07-19 05:10:30 +00:00
45c64ffdbf Update renovate for minio 2025-07-19 10:39:21 +05:30
4782feea47 Update renovate 2025-07-19 10:33:27 +05:30
Renovate Bot
a149ea683e Update docker.io/kyantech/palmr Docker tag to v3.1.3 2025-07-18 16:01:20 +00:00
356213da56 Update authentik 2025-07-17 18:47:50 +05:30
0e67040a34 Unarchive authentik 2025-07-17 18:10:34 +05:30
630e6a00a4 Add glance 2025-07-17 12:43:36 +05:30
e1ecdd2915 Add adguardhome-sync 2025-07-17 12:32:24 +05:30
4e4b266fd2 Archive gitea-mirror 2025-07-17 12:24:36 +05:30
5544d1ae9b Add gickup 2025-07-17 12:23:41 +05:30
00984b645b Change portainer to alpine and add healthchecks 2025-07-16 23:43:12 +05:30
a0eff55c47 Change portainer agent to alpine 2025-07-16 23:42:51 +05:30
847dd97780 Add healthchecks for syncyomi 2025-07-16 23:06:20 +05:30
85c2f99712 Increase healthcheck startup time for stirling-pdf 2025-07-16 23:01:18 +05:30
3d913f239b Update stirling-pdf healthchecks 2025-07-16 22:56:32 +05:30
55227d245e Add healthcheck for stirling-pdf 2025-07-16 22:54:51 +05:30
0a956ac586 Update comments for minio 2025-07-16 22:43:25 +05:30
e0012c2109 Update comments for homeassisstant 2025-07-16 22:42:38 +05:30
53076168b0 Add healthcheck for homeassisstant 2025-07-16 22:42:04 +05:30
5765d57aa1 Add healthcheck for minio 2025-07-16 22:35:04 +05:30
a495061034 Merge pull request 'Update docker.io/kyantech/palmr Docker tag to v3.1.2' (#16) from renovate/docker.io-kyantech-palmr-3.x into main
Reviewed-on: #16
2025-07-16 21:55:33 +05:30
Renovate Bot
1a1d8c2807 Update docker.io/kyantech/palmr Docker tag to v3.1.2 2025-07-16 16:01:43 +00:00
b03f93dcdf Add palmr with minio 2025-07-16 20:08:41 +05:30
78988cf36e Update palmr 2025-07-16 20:08:41 +05:30
1d40096c2d Merge pull request 'Update docker.io/homeassistant/home-assistant Docker tag to v2025.7.2' (#13) from renovate/docker.io-homeassistant-home-assistant-2025.x into main
Reviewed-on: #13
2025-07-15 18:08:14 +00:00
e18a395c34 Merge pull request 'Update ghcr.io/linuxserver/healthchecks Docker tag to v3.10.20250714' (#14) from renovate/ghcr.io-linuxserver-healthchecks-3.x into main
Reviewed-on: #14
2025-07-15 18:08:02 +00:00
74bdc53c26 Merge pull request 'Update docker.io/gitea/gitea Docker tag to v1.24.3' (#15) from renovate/docker.io-gitea-gitea-1.x into main
Reviewed-on: #15
2025-07-15 18:07:46 +00:00
Renovate Bot
b0da2fde9a Update docker.io/gitea/gitea Docker tag to v1.24.3 2025-07-15 14:32:03 +00:00
070a323d86 Update renovate 2025-07-14 23:31:25 +05:30
16df6a7ccc Remove mariadb for gitea and opengist 2025-07-14 23:22:39 +05:30
7e97b0d466 Update renovate for minio 2025-07-14 23:22:15 +05:30
Renovate Bot
9bc9facc7d Update docker.io/homeassistant/home-assistant Docker tag to v2025.7.2 2025-07-14 17:45:31 +00:00
49129a60b7 Update comments for nextcloud 2025-07-14 23:06:24 +05:30
5bcb057d44 Remove network for nextcloud 2025-07-14 23:03:48 +05:30
60edd88542 Add network for caddy 2025-07-14 22:57:49 +05:30
a1be0f3ae7 Remove network for caddy 2025-07-14 22:42:15 +05:30
cfd78c5a00 Remove network and reorder caddy 2025-07-14 22:33:49 +05:30
d283ac7f05 Archive unused caddy 2025-07-14 22:27:11 +05:30
3e3bf65459 Update homeassistant 2025-07-14 22:11:32 +05:30
c86d5d13aa Remove networks v3 2025-07-14 22:06:45 +05:30
3e219530e8 Remove network for uptimekuma 2025-07-14 21:57:28 +05:30
add41c8ee4 Remove networks v2 2025-07-14 21:51:12 +05:30
0af06e7078 Remove networks 2025-07-14 21:42:54 +05:30
685f5788df Unarchive minio 2025-07-14 21:30:48 +05:30
1fd2481b1d Update minio for community console 2025-07-14 21:30:31 +05:30
Renovate Bot
897f49186f Update ghcr.io/linuxserver/healthchecks Docker tag to v3.10.20250714 2025-07-14 14:32:43 +00:00
dc9d7b2097 Archive minio 2025-07-14 17:59:23 +05:30
51009ecfff Update minio 2025-07-14 13:24:01 +05:30
73b8c09468 Update stirling-pdf 2025-07-14 13:05:02 +05:30
ccfdf2ca83 Archive vaultwarden 2025-07-14 12:54:07 +05:30
ff5ae47bef Adding comprehensive comments 2025-07-14 12:48:15 +05:30
e57dfa763e Add minio 2025-07-13 22:11:55 +05:30
7870863b52 Update compose file orders 2025-07-13 18:43:04 +05:30
fb28d1ce31 Archive joplin 2025-07-13 18:36:42 +05:30
932f3014d7 Merge pull request 'Update docker.io/kyantech/palmr Docker tag to v3.1.1' (#11) from renovate/docker.io-kyantech-palmr-3.x into main
Reviewed-on: #11
2025-07-13 16:28:07 +05:30
06be4a1588 Merge pull request 'Update docker.io/library/nextcloud Docker tag to v31.0.7' (#12) from renovate/docker.io-library-nextcloud-31.x into main
Reviewed-on: #12
2025-07-13 16:27:56 +05:30
9e118b2e6e Update stirling-pdf 2025-07-13 16:23:55 +05:30
00640c19e3 Add ssh for opengist 2025-07-12 11:58:31 +05:30
Renovate Bot
f8ce60e578 Update docker.io/library/nextcloud Docker tag to v31.0.7 2025-07-12 02:31:24 +00:00
Renovate Bot
2e1954f2b1 Update docker.io/kyantech/palmr Docker tag to v3.1.1 2025-07-11 15:19:38 +00:00
62c8d68dfd Decouple network creation from portainer 2025-07-11 11:12:07 +05:30
ca0f5f774b Merge pull request 'Update ghcr.io/crowdsecurity/crowdsec Docker tag to v1.6.10' (#10) from renovate/ghcr.io-crowdsecurity-crowdsec-1.x into main
Reviewed-on: #10
2025-07-11 10:10:53 +05:30
Renovate Bot
1284650634 Update ghcr.io/crowdsecurity/crowdsec Docker tag to v1.6.10 2025-07-11 04:39:37 +00:00
d00e4428e7 Merge pull request 'Update codeberg.org/forgejo/forgejo Docker tag to v11.0.3' (#8) from renovate/codeberg.org-forgejo-forgejo-11.x into main
Reviewed-on: #8
2025-07-10 21:54:06 +05:30
21733f014f Merge pull request 'Update docker.io/valkey/valkey Docker tag to v8.1.3' (#9) from renovate/docker.io-valkey-valkey-8.x into main
Reviewed-on: #9
2025-07-10 21:53:17 +05:30
Renovate Bot
23fb579361 Update docker.io/valkey/valkey Docker tag to v8.1.3 2025-07-10 14:31:46 +00:00
Renovate Bot
38d3f023c2 Update codeberg.org/forgejo/forgejo Docker tag to v11.0.3 2025-07-10 14:31:41 +00:00
cd18d8f287 Update caddy networking 2025-07-10 17:30:27 +05:30
b7a9448784 Update nextcloud 2025-07-10 17:05:45 +05:30
34f99c64ce Update nextcloud cron 2025-07-10 16:56:03 +05:30
ac6e1d3fb8 Update Nextcloud cron 2025-07-10 16:51:02 +05:30
82f81d3ad0 Update nextcloud valkey log level 2025-07-10 16:49:02 +05:30
e0679e8104 Add cron for nextcloud 2025-07-10 16:47:19 +05:30
ed12d943df Update nextcloud 2025-07-10 16:05:52 +05:30
e9b47943ee Add mailer to joplin 2025-07-10 12:31:31 +05:30
d5500ae2e2 Update db name in joplin 2025-07-10 12:12:01 +05:30
100cb60c23 Move joplin out of archive 2025-07-10 12:06:19 +05:30
a15b5128c6 Add homepage 2025-07-10 12:04:36 +05:30
5036221e3e Update joplin 2025-07-10 12:04:25 +05:30
bd8b33ce38 Add joplin 2025-07-09 20:13:21 +05:30
802a317bb0 Update archived folder name 2025-07-09 20:00:46 +05:30
d0123c8dc0 Update Vaultwarden from initial config 2025-07-09 19:37:32 +05:30
fdef370242 Merge pull request 'Update ghcr.io/stirling-tools/stirling-pdf Docker tag to v1.0.2' (#7) from renovate/ghcr.io-stirling-tools-stirling-pdf-1.x into main
Reviewed-on: #7
2025-07-09 14:07:39 +05:30
bbef7a4093 Merge branch 'main' into renovate/ghcr.io-stirling-tools-stirling-pdf-1.x 2025-07-09 14:07:05 +05:30
Renovate Bot
b8982eb93d Update ghcr.io/stirling-tools/stirling-pdf Docker tag to v1.0.2 2025-07-09 02:31:35 +00:00
62 changed files with 1277 additions and 630 deletions

View File

@@ -0,0 +1,22 @@
# Docker Compose configuration for AdGuardHome Sync service
services:
adguardhome-sync:
# Basic container configuration
container_name: adguardhome-sync
image: docker.io/linuxserver/adguardhome-sync:0.7.6
restart: unless-stopped
# Environment configuration
environment:
- PUID=${PUID} # User ID for the container
- PGID=${PGID} # Group ID for the container
- TZ=${TZ} # Timezone
# Persistent storage configuration
volumes:
- ${CONFIG_PATH}:/config # Maps host config directory to container
# Network port configuration
ports:
- ${PORT}:8080 # Web interface port

View File

@@ -1,6 +1,6 @@
services: services:
crowdsec: crowdsec:
image: ghcr.io/crowdsecurity/crowdsec:v1.6.9 image: ghcr.io/crowdsecurity/crowdsec:v1.6.10
container_name: crowdsec container_name: crowdsec
environment: environment:
- BOUNCER_KEY_CADDY=${CROWDSEC_API_KEY} - BOUNCER_KEY_CADDY=${CROWDSEC_API_KEY}
@@ -12,7 +12,7 @@ services:
- ${APPDATA_PATH}/caddy/crowdsec-config:/etc/crowdsec/ - ${APPDATA_PATH}/caddy/crowdsec-config:/etc/crowdsec/
- ${APPDATA_PATH}/caddy/caddy-logs:/var/log/caddy:ro - ${APPDATA_PATH}/caddy/caddy-logs:/var/log/caddy:ro
networks: networks:
- backend - frontend
restart: unless-stopped restart: unless-stopped
healthcheck: healthcheck:
test: ["CMD-SHELL", "wget --spider --quiet --tries=1 --timeout=5 http://localhost:8080/health > /dev/null 2>&1 || exit 1"] test: ["CMD-SHELL", "wget --spider --quiet --tries=1 --timeout=5 http://localhost:8080/health > /dev/null 2>&1 || exit 1"]
@@ -44,11 +44,8 @@ services:
- ${HTTPS_PORT}:443/udp - ${HTTPS_PORT}:443/udp
networks: networks:
- frontend - frontend
- backend
restart: unless-stopped restart: unless-stopped
networks: networks:
frontend: frontend:
external: true external: true
backend:
external: true

View File

@@ -0,0 +1,18 @@
# Gickup service configuration
services:
gickup:
# Basic container configuration
container_name: gickup
image: ghcr.io/cooperspencer/gickup:0.10.38
restart: unless-stopped
# Command to run when the container starts
command: ["/gickup/conf.yml"] # Points to the configuration file inside the container
# Environment variables
environment:
- TZ=${TZ} # Timezone
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/gickup/conf.yml:/gickup/conf.yml

View File

@@ -0,0 +1,32 @@
# Gitea Mirror Service Configuration
services:
gitea-mirror:
# Basic container configuration
container_name: gitea-mirror
image: ghcr.io/raylabshq/gitea-mirror:v2.22.0
restart: unless-stopped
user: ${PUID}:${PGID} # Runs as specified user/group
# Application environment configuration
environment:
- NODE_ENV=production # Runtime environment
- DATABASE_URL=file:data/gitea-mirror.db # SQLite database location
- HOST=0.0.0.0 # Binding address
- PORT=4321 # Internal container port
- JWT_SECRET=${JWT_SECRET} # Authentication secret
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/gitea-mirror/data:/app/data # Application data storage
# Network port configuration
ports:
- ${PORT}:4321 # Maps host port to container
# Health check configuration
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=3", "--spider", "http://localhost:4321/api/health"]
interval: 30s # Check interval
timeout: 10s # Check timeout
retries: 5 # Allowed retries
start_period: 15s # Initial delay before checks

View File

@@ -0,0 +1,22 @@
# Glance - A modern dashboard for your self-hosted services
# Documentation: https://glanceapp.io/docs
services:
glance:
# Basic container configuration
container_name: glance
image: glanceapp/glance:v0.8.4 # Official Glance image with version
restart: unless-stopped # Automatically restart unless explicitly stopped
# Environment variables configuration
environment:
- MY_SECRET_TOKEN=${MY_SECRET_TOKEN} # Secret token for API authentication
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/glance/config:/app/config # Configuration files
- ${APPDATA_PATH}/glance/assets:/app/assets # Static assets and cache
# Network configuration
ports:
- ${PORT}:8080 # Map host port to container port (host:container)

105
.archived/glance/glance.yml Normal file
View File

@@ -0,0 +1,105 @@
pages:
- name: Home
# Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look
# hide-desktop-navigation: true
columns:
- size: small
widgets:
- type: calendar
first-day-of-week: monday
- type: rss
limit: 10
collapse-after: 3
cache: 12h
feeds:
- url: https://selfh.st/rss/
title: selfh.st
limit: 4
- url: https://ciechanow.ski/atom.xml
- url: https://www.joshwcomeau.com/rss.xml
title: Josh Comeau
- url: https://samwho.dev/rss.xml
- url: https://ishadeed.com/feed.xml
title: Ahmad Shadeed
- type: twitch-channels
channels:
- theprimeagen
- j_blow
- piratesoftware
- cohhcarnage
- christitustech
- EJ_SA
- size: full
widgets:
- type: group
widgets:
- type: hacker-news
- type: lobsters
- type: videos
channels:
- UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips
- UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling
- UCsBjURrPoezykLs9EqgamOA # Fireship
- UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee
- UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium
- type: group
widgets:
- type: reddit
subreddit: technology
show-thumbnails: true
- type: reddit
subreddit: selfhosted
show-thumbnails: true
- size: small
widgets:
- type: weather
location: London, United Kingdom
units: metric # alternatively "imperial"
hour-format: 12h # alternatively "24h"
# Optionally hide the location from being displayed in the widget
# hide-location: true
- type: markets
markets:
- symbol: SPY
name: S&P 500
- symbol: BTC-USD
name: Bitcoin
- symbol: NVDA
name: NVIDIA
- symbol: AAPL
name: Apple
- symbol: MSFT
name: Microsoft
- type: releases
cache: 1d
# Without authentication the Github API allows for up to 60 requests per hour. You can create a
# read-only token from your Github account settings and use it here to increase the limit.
# token: ...
repositories:
- glanceapp/glance
- go-gitea/gitea
- immich-app/immich
- syncthing/syncthing
# Add more pages here:
# - name: Your page name
# columns:
# - size: small
# widgets:
# # Add widgets here
# - size: full
# widgets:
# # Add widgets here
# - size: small
# widgets:
# # Add widgets here

88
.archived/glance/home.yml Normal file
View File

@@ -0,0 +1,88 @@
- name: Home
# Optionally, if you only have a single page you can hide the desktop navigation for a cleaner look
# hide-desktop-navigation: true
columns:
- size: small
widgets:
- type: calendar
first-day-of-week: monday
- type: rss
limit: 10
collapse-after: 3
cache: 12h
feeds:
- url: https://selfh.st/rss/
title: selfh.st
- url: https://ciechanow.ski/atom.xml
- url: https://www.joshwcomeau.com/rss.xml
title: Josh Comeau
- url: https://samwho.dev/rss.xml
- url: https://ishadeed.com/feed.xml
title: Ahmad Shadeed
- type: twitch-channels
channels:
- theprimeagen
- j_blow
- piratesoftware
- cohhcarnage
- christitustech
- EJ_SA
- size: full
widgets:
- type: group
widgets:
- type: hacker-news
- type: lobsters
- type: videos
channels:
- UCXuqSBlHAE6Xw-yeJA0Tunw # Linus Tech Tips
- UCR-DXc1voovS8nhAvccRZhg # Jeff Geerling
- UCsBjURrPoezykLs9EqgamOA # Fireship
- UCBJycsmduvYEL83R_U4JriQ # Marques Brownlee
- UCHnyfMqiRRG1u-2MsSQLbXA # Veritasium
- type: group
widgets:
- type: reddit
subreddit: technology
show-thumbnails: true
- type: reddit
subreddit: selfhosted
show-thumbnails: true
- size: small
widgets:
- type: weather
location: London, United Kingdom
units: metric # alternatively "imperial"
hour-format: 12h # alternatively "24h"
# Optionally hide the location from being displayed in the widget
# hide-location: true
- type: markets
markets:
- symbol: SPY
name: S&P 500
- symbol: BTC-USD
name: Bitcoin
- symbol: NVDA
name: NVIDIA
- symbol: AAPL
name: Apple
- symbol: MSFT
name: Microsoft
- type: releases
cache: 1d
# Without authentication the Github API allows for up to 60 requests per hour. You can create a
# read-only token from your Github account settings and use it here to increase the limit.
# token: ...
repositories:
- glanceapp/glance
- go-gitea/gitea
- immich-app/immich
- syncthing/syncthing

View File

@@ -0,0 +1,38 @@
services:
dockerproxy:
image: ghcr.io/tecnativa/docker-socket-proxy:0.3.0
container_name: dockerproxy
environment:
- CONTAINERS=${CONTAINERS} # Allow access to viewing containers
- SERVICES=${SERVICES} # Allow access to viewing services (necessary when using Docker Swarm)
- TASKS=${TASKS} # Allow access to viewing tasks (necessary when using Docker Swarm)
- POST=${POST} # Disallow any POST operations (effectively read-only)
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
ports:
- ${PROXY_PORT}:2375
networks:
- backend
restart: unless-stopped
homepage:
image: ghcr.io/gethomepage/homepage:v1.3.2
container_name: homepage
environment:
- PUID=${PUID}
- PGID=${PGID}
- HOMEPAGE_ALLOWED_HOSTS=${HOMEPAGE_ALLOWED_HOSTS}
volumes:
- ${APPDATA_PATH}/homepage/config:/app/config
ports:
- ${APP_PORT}:3000
networks:
- frontend
- backend
restart: unless-stopped
networks:
frontend:
external: true
backend:
external: true

View File

@@ -0,0 +1,57 @@
services:
joplin_db:
image: docker.io/library/postgres:17.5
container_name: joplin_db
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
volumes:
- ${APPDATA_PATH}/joplin/db:/var/lib/postgresql/data
ports:
- ${DB_PORT}:5432
networks:
- backend
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"]
start_period: 10s
interval: 10s
timeout: 5s
retries: 3
joplin_server:
image: docker.io/joplin/server:3.3.13
container_name: joplin_server
depends_on:
joplin_db:
condition: service_healthy
environment:
- APP_PORT=22300
- APP_BASE_URL=${APP_BASE_URL}
- DB_CLIENT=pg
- POSTGRES_HOST=joplin_db
- POSTGRES_PORT=5432
- POSTGRES_DATABASE=${POSTGRES_DB}
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- MAILER_ENABLED=${MAILER_ENABLED}
- MAILER_HOST=${MAILER_HOST}
- MAILER_PORT=${MAILER_PORT}
- MAILER_SECURITY=${MAILER_SECURITY}
- MAILER_AUTH_USER=${MAILER_AUTH_USER}
- MAILER_AUTH_PASSWORD=${MAILER_AUTH_PASSWORD}
- MAILER_NOREPLY_NAME=${MAILER_NOREPLY_NAME}
- MAILER_NOREPLY_EMAIL=${MAILER_NOREPLY_EMAIL}
ports:
- ${APP_PORT}:22300
networks:
- frontend
- backend
restart: unless-stopped
networks:
frontend:
external: true
backend:
external: true

View File

@@ -0,0 +1,71 @@
# Vaultwarden Configuration - (Bitwarden-compatible) Password Manager
services:
vaultwarden_db:
# PostgreSQL Database Configuration
container_name: vaultwarden_db
image: docker.io/library/postgres:17.5
restart: unless-stopped # Auto-recover from crashes
# Database credentials
environment:
- POSTGRES_USER=${POSTGRES_USER} # Database username
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # Database password
- POSTGRES_DB=${POSTGRES_DB} # Database name
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/vaultwarden/db:/var/lib/postgresql/data # Database files
# Network configuration
ports:
- ${DB_PORT}:5432 # PostgreSQL default port
networks:
- backend # Connects to backend network
# Health monitoring
healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"] # Connection check
interval: 30s # Check every 30 seconds
timeout: 5s # Maximum check duration
retries: 5 # Allow 5 failures before marking unhealthy
start_period: 20s # Initial grace period
vaultwarden_server:
container_name: vaultwarden_server
# Container configuration
image: ghcr.io/dani-garcia/vaultwarden:1.34.1 # Official Vaultwarden image
restart: unless-stopped # Auto-restart on failure
depends_on:
vaultwarden_db:
condition: service_healthy # Wait for healthy database
# Application settings
environment:
- PUID=${PUID} # User ID for file permissions
- PGID=${PGID} # Group ID for file permissions
- TZ=${TZ} # Timezone configuration
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@vaultwarden_db:5432/${POSTGRES_DB} # DB connection
- WEBSOCKET_ENABLED=${WEBSOCKET_ENABLED} # Real-time updates
- LOG_FILE=/data/vaultwarden.log # Log file location
# Uncomment and set these only on first run
# - DOMAIN=${DOMAIN} # Domain Name
# - SIGNUPS_ALLOWED=${SIGNUPS_ALLOWED} # User registration
# - ADMIN_TOKEN=${ADMIN_TOKEN} # Admin interface access token
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/vaultwarden/data:/data # Vault data storage
# Network configuration
ports:
- ${SERVER_PORT}:80 # Web interface port
networks:
- frontend # Connects to frontend network
- backend # Connects to backend network
# External network definitions
networks:
frontend:
external: true # Uses existing frontend network
backend:
external: true # Uses existing backend network

View File

@@ -1,13 +1,11 @@
# Adminer Database Management Tool Configuration
services: services:
adminer: adminer:
image: docker.io/library/adminer:5.3.0 # Basic container configuration
container_name: adminer container_name: adminer
ports: image: docker.io/library/adminer:5.3.0
- ${PORT}:8080
networks:
- backend
restart: unless-stopped restart: unless-stopped
networks: # Network port configuration
backend: ports:
external: true - ${PORT}:8080 # Maps host port to Adminer web interface

View File

@@ -1,57 +1,68 @@
# Authentik Identity Provider Configuration
services: services:
authentik_postgres: authentik_db:
image: docker.io/library/postgres:17.5 container_name: authentik_db
container_name: authentik_postgres image: docker.io/library/postgres:17.5-alpine
restart: unless-stopped
environment: environment:
- POSTGRES_DB=${POSTGRES_DB} # Database configuration
- POSTGRES_USER=${POSTGRES_USER} - POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
volumes: volumes:
- ${APPDATA_PATH}/authentik/db:/var/lib/postgresql/data - ${APPDATA_PATH}/authentik/db:/var/lib/postgresql/data
ports: ports:
- ${POSTGRES_PORT}:5432 - ${POSTGRES_PORT}:5432
restart: unless-stopped
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"] test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"]
start_period: 20s start_period: 10s
interval: 30s interval: 5s
retries: 5 retries: 5
timeout: 5s timeout: 5s
authentik_valkey: authentik_valkey:
image: docker.io/valkey/valkey:8.1.2
container_name: authentik_valkey container_name: authentik_valkey
command: valkey-server --save 60 1 --loglevel warning --requirepass ${VALKEY_PASSWORD} image: docker.io/valkey/valkey:8.1.3-alpine
restart: unless-stopped
command: valkey-server --save 60 1 --requirepass ${VALKEY_PASSWORD}
volumes: volumes:
- ${APPDATA_PATH}/authentik/valkey:/data - ${APPDATA_PATH}/authentik/valkey:/data
ports:
- ${VALKEY_PORT}:6379
restart: unless-stopped
healthcheck: healthcheck:
test: ["CMD-SHELL", "echo 'auth ${VALKEY_PASSWORD}\nping' | valkey-cli | grep PONG"] test: ["CMD-SHELL", "echo 'auth ${VALKEY_PASSWORD}\nping' | valkey-cli | grep PONG"]
start_period: 20s start_period: 10s
interval: 30s interval: 5s
retries: 5 retries: 5
timeout: 3s timeout: 5s
authentik_server: authentik_server:
image: ghcr.io/goauthentik/server:2025.6.2
container_name: authentik_server container_name: authentik_server
image: ghcr.io/goauthentik/server:2025.6.3
restart: unless-stopped
depends_on: depends_on:
authentik_postgres: authentik_db:
condition: service_healthy condition: service_healthy
authentik_valkey: authentik_valkey:
condition: service_healthy condition: service_healthy
command: server command: server
environment: environment:
# Generic configuration
- AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY} - AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY}
- AUTHENTIK_POSTGRESQL__HOST=authentik_postgres - AUTHENTIK_DISABLE_STARTUP_ANALYTICS=${AUTHENTIK_DISABLE_STARTUP_ANALYTICS}
- AUTHENTIK_DISABLE_UPDATE_CHECK=${AUTHENTIK_DISABLE_UPDATE_CHECK}
- AUTHENTIK_ERROR_REPORTING__ENABLED=${AUTHENTIK_ERROR_REPORTING__ENABLED}
# Database configuration
- AUTHENTIK_POSTGRESQL__HOST=authentik_db
- AUTHENTIK_POSTGRESQL__NAME=${POSTGRES_DB} - AUTHENTIK_POSTGRESQL__NAME=${POSTGRES_DB}
- AUTHENTIK_POSTGRESQL__USER=${POSTGRES_USER} - AUTHENTIK_POSTGRESQL__USER=${POSTGRES_USER}
- AUTHENTIK_POSTGRESQL__PASSWORD=${POSTGRES_PASSWORD} - AUTHENTIK_POSTGRESQL__PASSWORD=${POSTGRES_PASSWORD}
# Valkey cache configuration
- AUTHENTIK_REDIS__HOST=authentik_valkey - AUTHENTIK_REDIS__HOST=authentik_valkey
- AUTHENTIK_REDIS__PASSWORD=${VALKEY_PASSWORD} - AUTHENTIK_REDIS__PASSWORD=${VALKEY_PASSWORD}
- AUTHENTIK_ERROR_REPORTING__ENABLED=${AUTHENTIK_ERROR_REPORTING__ENABLED}
# Email configuration
- AUTHENTIK_EMAIL__HOST=${AUTHENTIK_EMAIL__HOST} - AUTHENTIK_EMAIL__HOST=${AUTHENTIK_EMAIL__HOST}
- AUTHENTIK_EMAIL__PORT=${AUTHENTIK_EMAIL__PORT} - AUTHENTIK_EMAIL__PORT=${AUTHENTIK_EMAIL__PORT}
- AUTHENTIK_EMAIL__USERNAME=${AUTHENTIK_EMAIL__USERNAME} - AUTHENTIK_EMAIL__USERNAME=${AUTHENTIK_EMAIL__USERNAME}
@@ -66,26 +77,36 @@ services:
ports: ports:
- ${AUTHENTIK_HTTP_PORT}:9000 - ${AUTHENTIK_HTTP_PORT}:9000
- ${AUTHENTIK_HTTPS_PORT}:9443 - ${AUTHENTIK_HTTPS_PORT}:9443
restart: unless-stopped
authentik_worker: authentik_worker:
image: ghcr.io/goauthentik/server:2025.6.2
container_name: authentik_worker container_name: authentik_worker
image: ghcr.io/goauthentik/server:2025.6.3
restart: unless-stopped
depends_on: depends_on:
authentik_postgres: authentik_db:
condition: service_healthy condition: service_healthy
authentik_valkey: authentik_valkey:
condition: service_healthy condition: service_healthy
command: worker command: worker
user: ${PUID}:${PGID}
environment: environment:
# Generic configuration
- AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY} - AUTHENTIK_SECRET_KEY=${AUTHENTIK_SECRET_KEY}
- AUTHENTIK_POSTGRESQL__HOST=authentik_postgres - AUTHENTIK_DISABLE_STARTUP_ANALYTICS=${AUTHENTIK_DISABLE_STARTUP_ANALYTICS}
- AUTHENTIK_DISABLE_UPDATE_CHECK=${AUTHENTIK_DISABLE_UPDATE_CHECK}
- AUTHENTIK_ERROR_REPORTING__ENABLED=${AUTHENTIK_ERROR_REPORTING__ENABLED}
# Database configuration
- AUTHENTIK_POSTGRESQL__HOST=authentik_db
- AUTHENTIK_POSTGRESQL__NAME=${POSTGRES_DB} - AUTHENTIK_POSTGRESQL__NAME=${POSTGRES_DB}
- AUTHENTIK_POSTGRESQL__USER=${POSTGRES_USER} - AUTHENTIK_POSTGRESQL__USER=${POSTGRES_USER}
- AUTHENTIK_POSTGRESQL__PASSWORD=${POSTGRES_PASSWORD} - AUTHENTIK_POSTGRESQL__PASSWORD=${POSTGRES_PASSWORD}
# Valkey cache configuration
- AUTHENTIK_REDIS__HOST=authentik_valkey - AUTHENTIK_REDIS__HOST=authentik_valkey
- AUTHENTIK_REDIS__PASSWORD=${VALKEY_PASSWORD} - AUTHENTIK_REDIS__PASSWORD=${VALKEY_PASSWORD}
- AUTHENTIK_ERROR_REPORTING__ENABLED=${AUTHENTIK_ERROR_REPORTING__ENABLED}
# Email configuration
- AUTHENTIK_EMAIL__HOST=${AUTHENTIK_EMAIL__HOST} - AUTHENTIK_EMAIL__HOST=${AUTHENTIK_EMAIL__HOST}
- AUTHENTIK_EMAIL__PORT=${AUTHENTIK_EMAIL__PORT} - AUTHENTIK_EMAIL__PORT=${AUTHENTIK_EMAIL__PORT}
- AUTHENTIK_EMAIL__USERNAME=${AUTHENTIK_EMAIL__USERNAME} - AUTHENTIK_EMAIL__USERNAME=${AUTHENTIK_EMAIL__USERNAME}
@@ -98,4 +119,3 @@ services:
- ${APPDATA_PATH}/authentik/media:/media - ${APPDATA_PATH}/authentik/media:/media
- ${APPDATA_PATH}/authentik/certs:/certs - ${APPDATA_PATH}/authentik/certs:/certs
- ${APPDATA_PATH}/authentik/custom-templates:/templates - ${APPDATA_PATH}/authentik/custom-templates:/templates
restart: unless-stopped

View File

@@ -1,19 +1,30 @@
# Caddy with Cloudflare DDNS and CrowdSec Security
services: services:
# CrowdSec - Security automation service that protects from attacks
crowdsec: crowdsec:
image: ghcr.io/crowdsecurity/crowdsec:v1.6.9
container_name: crowdsec container_name: crowdsec
image: ghcr.io/crowdsecurity/crowdsec:v1.6.10 # Official CrowdSec image
restart: unless-stopped # Auto-restart unless explicitly stopped
# Environment configuration
environment: environment:
- BOUNCER_KEY_CADDY=${CROWDSEC_API_KEY} - BOUNCER_KEY_CADDY=${CROWDSEC_API_KEY} # API key for Caddy integration
- GID=${GID} - GID=${GID} # Group ID for file permissions
- COLLECTIONS=${COLLECTIONS} - COLLECTIONS=${COLLECTIONS} # Security collections to enable
# Persistent storage volumes
volumes: volumes:
- ${APPDATA_PATH}/caddy/crowdsec-acquis.d:/etc/crowdsec/acquis.d - ${APPDATA_PATH}/caddy/crowdsec-acquis.d:/etc/crowdsec/acquis.d # Log parsers
- ${APPDATA_PATH}/caddy/crowdsec-db:/var/lib/crowdsec/data/ - ${APPDATA_PATH}/caddy/crowdsec-db:/var/lib/crowdsec/data/ # Security database
- ${APPDATA_PATH}/caddy/crowdsec-config:/etc/crowdsec/ - ${APPDATA_PATH}/caddy/crowdsec-config:/etc/crowdsec/ # Configuration files
- ${APPDATA_PATH}/caddy/caddy-logs:/var/log/caddy:ro - ${APPDATA_PATH}/caddy/caddy-logs:/var/log/caddy:ro # Read-only log access
# Networks
networks: networks:
- backend proxy:
restart: unless-stopped ipv4_address: 172.30.0.3
# Health check configuration
healthcheck: healthcheck:
test: ["CMD-SHELL", "wget --spider --quiet --tries=1 --timeout=5 http://localhost:8080/health > /dev/null 2>&1 || exit 1"] test: ["CMD-SHELL", "wget --spider --quiet --tries=1 --timeout=5 http://localhost:8080/health > /dev/null 2>&1 || exit 1"]
interval: 30s interval: 30s
@@ -21,34 +32,50 @@ services:
retries: 3 retries: 3
start_period: 30s start_period: 30s
# Caddy web server with Cloudflare DDNS integration
caddy: caddy:
image: docker.io/ryuupendragon/caddy-cloudflare-ddns-crowdsec:2.10.0
container_name: caddy container_name: caddy
image: docker.io/ryuupendragon/caddy-cloudflare-ddns-crowdsec:2.10.0 # Custom Caddy image with Cloudflare, DDNS and CrowdSec plugins
restart: unless-stopped # Auto-restart on failure
# Service dependencies
depends_on: depends_on:
crowdsec: crowdsec:
condition: service_healthy condition: service_healthy # Requires working CrowdSec before starting
# Required network capabilities
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN # Needed for network-level operations
# Environment configuration
environment: environment:
- CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN} - CLOUDFLARE_API_TOKEN=${CLOUDFLARE_API_TOKEN} # Cloudflare API token for DNS updates
- CROWDSEC_API_KEY=${CROWDSEC_API_KEY} - CROWDSEC_API_KEY=${CROWDSEC_API_KEY} # Security key for CrowdSec integration
# Persistent storage volumes
volumes: volumes:
- ${APPDATA_PATH}/caddy/caddy-file:/etc/caddy - ${APPDATA_PATH}/caddy/caddy-file:/etc/caddy # Caddyfile configuration
- ${APPDATA_PATH}/caddy/caddy-config:/config - ${APPDATA_PATH}/caddy/caddy-config:/config # Automatic HTTPS certificates
- ${APPDATA_PATH}/caddy/caddy-data:/data - ${APPDATA_PATH}/caddy/caddy-data:/data # Site data and assets
- ${APPDATA_PATH}/caddy/caddy-logs:/logs - ${APPDATA_PATH}/caddy/caddy-logs:/logs # Access logs
- ${APPDATA_PATH}/caddy/caddy-srv:/srv - ${APPDATA_PATH}/caddy/caddy-srv:/srv # Served content
# Network ports
ports: ports:
- ${HTTP_PORT}:80 - ${HTTP_PORT}:80 # HTTP traffic
- ${HTTPS_PORT}:443 - ${HTTPS_PORT}:443 # HTTPS traffic
- ${HTTPS_PORT}:443/udp - ${HTTPS_PORT}:443/udp # QUIC/HTTP3 support
# Networks
networks: networks:
- frontend proxy:
- backend ipv4_address: 172.30.0.2
restart: unless-stopped
networks: networks:
frontend: proxy:
external: true name: proxy
backend: driver: bridge
external: true ipam:
config:
- subnet: 172.30.0.0/16
gateway: 172.30.0.1

View File

@@ -1,19 +1,30 @@
# Caddy with CrowdSec Security
services: services:
# CrowdSec - Security automation service that protects from attacks
crowdsec: crowdsec:
image: ghcr.io/crowdsecurity/crowdsec:v1.6.9
container_name: crowdsec container_name: crowdsec
image: ghcr.io/crowdsecurity/crowdsec:v1.6.10 # Official CrowdSec image
restart: unless-stopped # Auto-restart unless explicitly stopped
# Environment configuration
environment: environment:
- BOUNCER_KEY_CADDY=${CROWDSEC_API_KEY} - BOUNCER_KEY_CADDY=${CROWDSEC_API_KEY} # API key for Caddy integration
- GID=${GID} - GID=${GID} # Group ID for file permissions
- COLLECTIONS=${COLLECTIONS} - COLLECTIONS=${COLLECTIONS} # Security collections to enable
# Persistent storage volumes
volumes: volumes:
- ${APPDATA_PATH}/caddy/crowdsec-acquis.d:/etc/crowdsec/acquis.d - ${APPDATA_PATH}/caddy/crowdsec-acquis.d:/etc/crowdsec/acquis.d # Log parsers
- ${APPDATA_PATH}/caddy/crowdsec-db:/var/lib/crowdsec/data/ - ${APPDATA_PATH}/caddy/crowdsec-db:/var/lib/crowdsec/data/ # Security database
- ${APPDATA_PATH}/caddy/crowdsec-config:/etc/crowdsec/ - ${APPDATA_PATH}/caddy/crowdsec-config:/etc/crowdsec/ # Configuration files
- ${APPDATA_PATH}/caddy/caddy-logs:/var/log/caddy:ro - ${APPDATA_PATH}/caddy/caddy-logs:/var/log/caddy:ro # Read-only log access
# Networks
networks: networks:
- backend proxy:
restart: unless-stopped ipv4_address: 172.30.0.3
# Health check configuration
healthcheck: healthcheck:
test: ["CMD-SHELL", "wget --spider --quiet --tries=1 --timeout=5 http://localhost:8080/health > /dev/null 2>&1 || exit 1"] test: ["CMD-SHELL", "wget --spider --quiet --tries=1 --timeout=5 http://localhost:8080/health > /dev/null 2>&1 || exit 1"]
interval: 30s interval: 30s
@@ -21,33 +32,49 @@ services:
retries: 3 retries: 3
start_period: 30s start_period: 30s
# Caddy web server with CrowdSec security
caddy: caddy:
image: docker.io/ryuupendragon/caddy-crowdsec:2.10.0
container_name: caddy container_name: caddy
image: docker.io/ryuupendragon/caddy-crowdsec:2.10.0 # Custom Caddy image with CrowdSec plugins
restart: unless-stopped # Auto-restart on failure
# Service dependencies
depends_on: depends_on:
crowdsec: crowdsec:
condition: service_healthy condition: service_healthy # Requires working CrowdSec before starting
# Required network capabilities
cap_add: cap_add:
- NET_ADMIN - NET_ADMIN # Needed for network-level operations
# Environment configuration
environment: environment:
- CROWDSEC_API_KEY=${CROWDSEC_API_KEY} - CROWDSEC_API_KEY=${CROWDSEC_API_KEY} # Security key for CrowdSec integration
# Persistent storage volumes
volumes: volumes:
- ${APPDATA_PATH}/caddy/caddy-file:/etc/caddy - ${APPDATA_PATH}/caddy/caddy-file:/etc/caddy # Caddyfile configuration
- ${APPDATA_PATH}/caddy/caddy-config:/config - ${APPDATA_PATH}/caddy/caddy-config:/config # Automatic HTTPS certificates
- ${APPDATA_PATH}/caddy/caddy-data:/data - ${APPDATA_PATH}/caddy/caddy-data:/data # Site data and assets
- ${APPDATA_PATH}/caddy/caddy-logs:/logs - ${APPDATA_PATH}/caddy/caddy-logs:/logs # Access logs
- ${APPDATA_PATH}/caddy/caddy-srv:/srv - ${APPDATA_PATH}/caddy/caddy-srv:/srv # Served content
# Network ports
ports: ports:
- ${HTTP_PORT}:80 - ${HTTP_PORT}:80 # HTTP traffic
- ${HTTPS_PORT}:443 - ${HTTPS_PORT}:443 # HTTPS traffic
- ${HTTPS_PORT}:443/udp - ${HTTPS_PORT}:443/udp # QUIC/HTTP3 support
# Networks
networks: networks:
- frontend proxy:
- backend ipv4_address: 172.30.0.2
restart: unless-stopped
networks: networks:
frontend: proxy:
external: true name: proxy
backend: driver: bridge
external: true ipam:
config:
- subnet: 172.30.0.0/16
gateway: 172.30.0.1

View File

@@ -1,19 +1,19 @@
# Forgejo Git Service Configuration
services: services:
forgejo: forgejo:
image: codeberg.org/forgejo/forgejo:11.0.2-rootless # Basic container configuration
container_name: forgejo container_name: forgejo
user: ${PUID}:${PGID} image: codeberg.org/forgejo/forgejo:11.0.3-rootless
volumes:
- ${APPDATA_PATH}/forgejo/config:/etc/gitea
- ${APPDATA_PATH}/forgejo/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- ${SERVER_PORT}:3000
networks:
- frontend
restart: unless-stopped restart: unless-stopped
user: ${PUID}:${PGID} # Runs as specified user/group
networks: # Persistent storage configuration
frontend: volumes:
external: true - ${APPDATA_PATH}/forgejo/config:/etc/gitea # Configuration files
- ${APPDATA_PATH}/forgejo/data:/var/lib/gitea # Application data
- /etc/timezone:/etc/timezone:ro # Timezone configuration
- /etc/localtime:/etc/localtime:ro # Local time configuration
# Network port configuration
ports:
- ${SERVER_PORT}:3000 # Maps host port to Forgejo web interface

View File

@@ -1,28 +0,0 @@
services:
gitea-mirror:
image: ghcr.io/raylabshq/gitea-mirror:v2.22.0
container_name: gitea-mirror
user: ${PUID}:${PGID}
environment:
- NODE_ENV=production
- DATABASE_URL=file:data/gitea-mirror.db
- HOST=0.0.0.0
- PORT=4321
- JWT_SECRET=${JWT_SECRET}
volumes:
- ${APPDATA_PATH}/gitea-mirror/data:/app/data
ports:
- ${PORT}:4321
networks:
- internal
restart: unless-stopped
healthcheck:
test: ["CMD", "wget", "--no-verbose", "--tries=3", "--spider", "http://localhost:4321/api/health"]
interval: 30s
timeout: 10s
retries: 5
start_period: 15s
networks:
internal:
external: true

View File

@@ -1,42 +1,61 @@
# Gitea Multiple Actions Runners Configuration
services: services:
# First Gitea Runner Instance
gitea_runner1: gitea_runner1:
image: docker.io/gitea/act_runner:0.2.12 # Basic container configuration
container_name: gitea_runner1 container_name: gitea_runner1
environment: image: docker.io/gitea/act_runner:0.2.12
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: "${INSTANCE_URL}"
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}"
GITEA_RUNNER_NAME: "${RUNNER_NAME1}"
volumes:
- ./config.yaml:/config.yaml
- ./data1:/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped restart: unless-stopped
# Runner configuration environment variables
environment:
CONFIG_FILE: /config.yaml # Path to configuration file
GITEA_INSTANCE_URL: "${INSTANCE_URL}" # URL of Gitea instance
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}" # Registration token
GITEA_RUNNER_NAME: "${RUNNER_NAME1}" # Display name for first runner
# Persistent storage and docker socket configuration
volumes:
- ./config.yaml:/config.yaml # Shared configuration file
- ./data1:/data # Dedicated data directory for runner1
- /var/run/docker.sock:/var/run/docker.sock # Docker socket for container jobs
# Second Gitea Runner Instance
gitea_runner2: gitea_runner2:
image: docker.io/gitea/act_runner:0.2.12 # Basic container configuration
container_name: gitea_runner2 container_name: gitea_runner2
environment: image: docker.io/gitea/act_runner:0.2.12
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: "${INSTANCE_URL}"
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}"
GITEA_RUNNER_NAME: "${RUNNER_NAME2}"
volumes:
- ./config.yaml:/config.yaml
- ./data2:/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped restart: unless-stopped
gitea_runner3: # Runner configuration environment variables
image: docker.io/gitea/act_runner:0.2.12
container_name: gitea_runner3
environment: environment:
CONFIG_FILE: /config.yaml CONFIG_FILE: /config.yaml # Path to configuration file
GITEA_INSTANCE_URL: "${INSTANCE_URL}" GITEA_INSTANCE_URL: "${INSTANCE_URL}" # URL of Gitea instance
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}" GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}" # Registration token
GITEA_RUNNER_NAME: "${RUNNER_NAME3}" GITEA_RUNNER_NAME: "${RUNNER_NAME2}" # Display name for second runner
# Persistent storage and docker socket configuration
volumes: volumes:
- ./config.yaml:/config.yaml - ./config.yaml:/config.yaml # Shared configuration file
- ./data3:/data - ./data2:/data # Dedicated data directory for runner2
- /var/run/docker.sock:/var/run/docker.sock - /var/run/docker.sock:/var/run/docker.sock # Docker socket for container jobs
# Third Gitea Runner Instance
gitea_runner3:
# Basic container configuration
container_name: gitea_runner3
image: docker.io/gitea/act_runner:0.2.12
restart: unless-stopped restart: unless-stopped
# Runner configuration environment variables
environment:
CONFIG_FILE: /config.yaml # Path to configuration file
GITEA_INSTANCE_URL: "${INSTANCE_URL}" # URL of Gitea instance
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}" # Registration token
GITEA_RUNNER_NAME: "${RUNNER_NAME3}" # Display name for third runner
# Persistent storage and docker socket configuration
volumes:
- ./config.yaml:/config.yaml # Shared configuration file
- ./data3:/data # Dedicated data directory for runner3
- /var/run/docker.sock:/var/run/docker.sock # Docker socket for container jobs

View File

@@ -1,14 +1,20 @@
# Gitea Actions Runner Configuration
services: services:
gitea_runner: gitea_runner:
image: docker.io/gitea/act_runner:0.2.12 # Basic container configuration
container_name: gitea_runner container_name: gitea_runner
environment: image: docker.io/gitea/act_runner:0.2.12
CONFIG_FILE: /config.yaml
GITEA_INSTANCE_URL: "${INSTANCE_URL}"
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}"
GITEA_RUNNER_NAME: "${RUNNER_NAME}"
volumes:
- ./config.yaml:/config.yaml
- ./data:/data
- /var/run/docker.sock:/var/run/docker.sock
restart: unless-stopped restart: unless-stopped
# Runner configuration environment variables
environment:
CONFIG_FILE: /config.yaml # Path to configuration file
GITEA_INSTANCE_URL: "${INSTANCE_URL}" # URL of Gitea instance
GITEA_RUNNER_REGISTRATION_TOKEN: "${REGISTRATION_TOKEN}" # Registration token
GITEA_RUNNER_NAME: "${RUNNER_NAME}" # Display name for runner
# Persistent storage and docker socket configuration
volumes:
- ./config.yaml:/config.yaml # Runner configuration file
- ./data:/data # Persistent runner data
- /var/run/docker.sock:/var/run/docker.sock # Docker socket for container jobs

View File

@@ -1,55 +0,0 @@
services:
gitea_db:
image: docker.io/library/mariadb:11.8.2
container_name: gitea_db
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --innodb-file-per-table=1 --skip-innodb-read-only-compressed
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
volumes:
- ${APPDATA_PATH}/gitea/db:/var/lib/mysql
ports:
- ${DB_PORT}:3306
networks:
- backend
restart: unless-stopped
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
start_period: 10s
interval: 10s
timeout: 5s
retries: 3
gitea_server:
image: docker.io/gitea/gitea:1.24.2-rootless
container_name: gitea_server
depends_on:
gitea_db:
condition: service_healthy
user: ${PUID}:${PGID}
environment:
- GITEA__database__DB_TYPE=mysql
- GITEA__database__HOST=gitea_db:3306
- GITEA__database__NAME=${MYSQL_DATABASE}
- GITEA__database__USER=${MYSQL_USER}
- GITEA__database__PASSWD=${MYSQL_PASSWORD}
volumes:
- ${APPDATA_PATH}/gitea/config:/etc/gitea
- ${APPDATA_PATH}/gitea/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- ${SERVER_PORT}:3000
- ${SSH_PORT}:22
networks:
- frontend
- backend
restart: unless-stopped
networks:
frontend:
external: true
backend:
external: true

View File

@@ -1,53 +1,63 @@
# Gitea Git Service with PostgreSQL Database Configuration
services: services:
# PostgreSQL Database Service
gitea_db: gitea_db:
image: docker.io/library/postgres:17.5 # Basic container configuration
container_name: gitea_db container_name: gitea_db
image: docker.io/library/postgres:17.5
restart: unless-stopped
# Database credentials and configuration
environment: environment:
- POSTGRES_USER=${POSTGRES_USER} - POSTGRES_USER=${POSTGRES_USER} # Database admin username
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} - POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # Database admin password
- POSTGRES_DB=${POSTGRES_DB} - POSTGRES_DB=${POSTGRES_DB} # Database name for Gitea
# Persistent storage configuration
volumes: volumes:
- ${APPDATA_PATH}/gitea/db:/var/lib/postgresql/data - ${APPDATA_PATH}/gitea/db:/var/lib/postgresql/data
# Network port configuration
ports: ports:
- ${DB_PORT}:5432 - ${DB_PORT}:5432 # Maps host port to PostgreSQL
networks:
- backend # Health check configuration
restart: unless-stopped
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"] test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"]
start_period: 10s start_period: 10s # Initial delay before checks
interval: 10s interval: 10s # Check interval
timeout: 5s timeout: 5s # Check timeout
retries: 3 retries: 3 # Allowed retries
# Gitea Server Service
gitea_server: gitea_server:
image: docker.io/gitea/gitea:1.24.2-rootless # Basic container configuration
container_name: gitea_server container_name: gitea_server
image: docker.io/gitea/gitea:1.24.3-rootless
restart: unless-stopped
# Service dependencies
depends_on: depends_on:
gitea_db: gitea_db:
condition: service_healthy condition: service_healthy # Requires healthy database
user: ${PUID}:${PGID}
# Runtime configuration
user: ${PUID}:${PGID} # Runs as specified user/group
environment: environment:
- GITEA__database__DB_TYPE=postgres - GITEA__database__DB_TYPE=postgres # Database type
- GITEA__database__HOST=gitea_db:5432 - GITEA__database__HOST=gitea_db:5432 # Database host
- GITEA__database__NAME=${POSTGRES_DB} - GITEA__database__NAME=${POSTGRES_DB} # Database name
- GITEA__database__USER=${POSTGRES_USER} - GITEA__database__USER=${POSTGRES_USER} # Database username
- GITEA__database__PASSWD=${POSTGRES_PASSWORD} - GITEA__database__PASSWD=${POSTGRES_PASSWORD} # Database password
# Persistent storage configuration
volumes: volumes:
- ${APPDATA_PATH}/gitea/config:/etc/gitea - ${APPDATA_PATH}/gitea/config:/etc/gitea
- ${APPDATA_PATH}/gitea/data:/var/lib/gitea - ${APPDATA_PATH}/gitea/data:/var/lib/gitea
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro - /etc/localtime:/etc/localtime:ro
ports:
- ${SERVER_PORT}:3000
- ${SSH_PORT}:22
networks:
- frontend
- backend
restart: unless-stopped
networks: # Network port configuration
frontend: ports:
external: true - ${SERVER_PORT}:3000 # Maps host port to Gitea web interface
backend: - ${SSH_PORT}:22 # Maps host port to Gitea SSH
external: true

View File

@@ -1,18 +1,20 @@
# Gotify Push Notification Server Configuration
services: services:
gotify: gotify:
image: ghcr.io/gotify/server:2.6.3 # Basic container configuration
container_name: gotify container_name: gotify
environment: image: ghcr.io/gotify/server:2.6.3
- TZ=${TZ}
- GOTIFY_REGISTRATION=${GOTIFY_REGISTRATION}
volumes:
- ${APPDATA_PATH}/gotify/config:/app/data
ports:
- ${PORT}:80
networks:
- frontend
restart: unless-stopped restart: unless-stopped
networks: # Application environment configuration
frontend: environment:
external: true - TZ=${TZ} # Timezone configuration
- GOTIFY_REGISTRATION=${GOTIFY_REGISTRATION} # Allow/disallow new user registration
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/gotify/config:/app/data # Configuration and database storage
# Network port configuration
ports:
- ${PORT}:80 # Maps host port to Gotify web interface

View File

@@ -1,39 +1,52 @@
# Healthchecks.io Monitoring Service Configuration
services: services:
healthchecks: healthchecks:
image: ghcr.io/linuxserver/healthchecks:3.10.20250705 # Basic container configuration
container_name: healthchecks container_name: healthchecks
environment: image: ghcr.io/linuxserver/healthchecks:3.10.20250714
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- SITE_ROOT=${SITE_ROOT}
- SITE_NAME=${SITE_NAME}
- DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL}
- EMAIL_HOST=${EMAIL_HOST}
- EMAIL_PORT=${EMAIL_PORT}
- EMAIL_HOST_USER=${EMAIL_HOST_USER}
- EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD}
- EMAIL_USE_TLS=${EMAIL_USE_TLS}
- EMAIL_USE_SSL=${EMAIL_USE_SSL}
- SUPERUSER_EMAIL=${SUPERUSER_EMAIL}
- SUPERUSER_PASSWORD=${SUPERUSER_PASSWORD}
- SECRET_KEY=${SECRET_KEY}
- APPRISE_ENABLED=${APPRISE_ENABLED}
- REGISTRATION_OPEN=${REGISTRATION_OPEN}
- DEBUG=${DEBUG}
- SITE_LOGO_URL=${SITE_LOGO_URL}
- PING_EMAIL_DOMAIN=${PING_EMAIL_DOMAIN}
- DISCORD_CLIENT_ID=${DISCORD_CLIENT_ID}
- DISCORD_CLIENT_SECRET=${DISCORD_CLIENT_SECRET}
volumes:
- ${APPDATA_PATH}/healthchecks/config:/config
ports:
- ${PORT}:8000
- ${SMTP_PORT}:2525
networks:
- frontend
restart: unless-stopped restart: unless-stopped
networks: # User and system configuration
frontend: environment:
external: true # System settings
- PUID=${PUID} # User ID
- PGID=${PGID} # Group ID
- TZ=${TZ} # Timezone
# Site configuration
- SITE_ROOT=${SITE_ROOT} # Base URL
- SITE_NAME=${SITE_NAME} # Site display name
- SITE_LOGO_URL=${SITE_LOGO_URL} # Custom logo URL
# Email server configuration
- DEFAULT_FROM_EMAIL=${DEFAULT_FROM_EMAIL} # Sender address
- EMAIL_HOST=${EMAIL_HOST} # SMTP server
- EMAIL_PORT=${EMAIL_PORT} # SMTP port
- EMAIL_HOST_USER=${EMAIL_HOST_USER} # SMTP username
- EMAIL_HOST_PASSWORD=${EMAIL_HOST_PASSWORD} # SMTP password
- EMAIL_USE_TLS=${EMAIL_USE_TLS} # Enable TLS
- EMAIL_USE_SSL=${EMAIL_USE_SSL} # Enable SSL
- PING_EMAIL_DOMAIN=${PING_EMAIL_DOMAIN} # Email domain for pings
# Authentication and security
- SUPERUSER_EMAIL=${SUPERUSER_EMAIL} # Admin email
- SUPERUSER_PASSWORD=${SUPERUSER_PASSWORD} # Admin password
- SECRET_KEY=${SECRET_KEY} # Cryptographic secret
- REGISTRATION_OPEN=${REGISTRATION_OPEN} # Allow new registrations
# Integration settings
- APPRISE_ENABLED=${APPRISE_ENABLED} # Enable Apprise notifications
- DISCORD_CLIENT_ID=${DISCORD_CLIENT_ID} # Discord integration
- DISCORD_CLIENT_SECRET=${DISCORD_CLIENT_SECRET} # Discord secret
# Debugging
- DEBUG=${DEBUG} # Debug mode
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/healthchecks/config:/config # Configuration storage
# Network port configuration
ports:
- ${PORT}:8000 # Web interface port
- ${SMTP_PORT}:2525 # SMTP port for email

View File

@@ -1,16 +1,22 @@
# Home Assistant Smart Home Platform Configuration
services: services:
homeassistant: homeassistant:
image: docker.io/homeassistant/home-assistant:2025.7.1 # Basic container configuration
container_name: homeassistant container_name: homeassistant
volumes: image: docker.io/homeassistant/home-assistant:2025.7.2 # Official Home Assistant image
- ${APPDATA_PATH}/homeassistant/config:/config restart: unless-stopped # Auto-restart on failure
- /etc/localtime:/etc/localtime:ro
ports:
- ${PORT}:8123
networks:
- frontend
restart: unless-stopped
networks: # Network configuration (host mode for full local network access)
frontend: network_mode: host # Required for discovering local devices and integrations
external: true
# Persistent storage and system configuration
volumes:
- ${APPDATA_PATH}/homeassistant/config:/config # Configuration files
- /etc/localtime:/etc/localtime:ro # Sync host timezone for proper logging
# Health check configuration
healthcheck:
test: "curl --connect-timeout 10 --silent -f http://127.0.0.1:8123/ || exit 1"
interval: 45s
timeout: 30s
retries: 3

54
minio/docker-compose.yml Normal file
View File

@@ -0,0 +1,54 @@
# MinIO Object Storage Service Configuration
services:
# MinIO Server Service
minio:
# Basic container configuration
container_name: minio
image: docker.io/minio/minio:RELEASE.2025-07-18T21-56-31Z # Official MinIO image
restart: unless-stopped # Auto-restart on failure
# Runtime command and user permissions
command: server /data # Start in server mode
user: ${PUID}:${PGID} # Run as specified user/group
# Environment variables for authentication and configuration
environment:
- MINIO_ROOT_USER=${MINIO_ROOT_USER} # Admin username
- MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD} # Admin password
- MINIO_SERVER_URL=${MINIO_SERVER_URL} # Public server endpoint URL
- MINIO_BROWSER_REDIRECT_URL=${MINIO_BROWSER_REDIRECT_URL} # Web console redirect URL
# Persistent data storage
volumes:
- ${APPDATA_PATH}/minio/data:/data # Maps host directory to container
# Network ports
ports:
- ${API_PORT}:9000 # MinIO API port
# Health check configuration
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# MinIO Console Service (separate from main server)
minio-console:
# Basic container configuration
container_name: minio-console
image: ghcr.io/georgmangold/console:v1.8.1 # Community console UI
restart: unless-stopped # Auto-restart on failure
# Service dependencies
depends_on:
minio:
condition: service_healthy
# Environment configuration
environment:
- CONSOLE_MINIO_SERVER=http://minio:9000 # Points to MinIO server
# Network ports
ports:
- ${CONSOLE_PORT}:9090 # Web console access port

3
network.sh Normal file
View File

@@ -0,0 +1,3 @@
docker network create --subnet=172.30.10.0/24 --gateway=172.30.10.1 frontend
docker network create --subnet=172.30.20.0/24 --gateway=172.30.20.1 backend
docker network create --subnet=172.30.30.0/24 --gateway=172.30.30.1 internal

View File

@@ -0,0 +1,136 @@
# Nextcloud with PostgreSQL and Valkey Configuration
services:
# PostgreSQL Database Service
nextcloud_db:
# Basic container configuration
container_name: nextcloud_db
image: docker.io/library/postgres:17.5
restart: unless-stopped
# Database credentials
environment:
- POSTGRES_USER=${POSTGRES_USER} # Database username
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # Database password
- POSTGRES_DB=${POSTGRES_DB} # Database name
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/nextcloud/db:/var/lib/postgresql/data # Database files
# Network configuration
ports:
- ${DB_PORT}:5432 # PostgreSQL port
# Health monitoring
healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"]
start_period: 10s # Initial delay
interval: 10s # Check frequency
timeout: 5s # Timeout duration
retries: 3 # Retry attempts
# Valkey (Redis-compatible) Cache Service
nextcloud_valkey:
# Basic container configuration
container_name: nextcloud_valkey
image: docker.io/valkey/valkey:8.1.3
restart: unless-stopped
# Runtime configuration
command: valkey-server --save 60 1 --requirepass ${VALKEY_PASSWORD} # Persistence and auth
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/nextcloud/valkey:/data # Valkey data
# Network configuration
ports:
- ${VALKEY_PORT}:6379 # Valkey port
# Health monitoring
healthcheck:
test: ["CMD-SHELL", "echo 'auth ${VALKEY_PASSWORD}\nping' | valkey-cli | grep PONG"]
start_period: 20s # Initial delay
interval: 30s # Check frequency
retries: 5 # Retry attempts
timeout: 3s # Timeout duration
# Nextcloud Application Service
nextcloud_app:
# Basic container configuration
image: docker.io/library/nextcloud:31.0.7
container_name: nextcloud_app
restart: unless-stopped
# Service dependencies
depends_on:
nextcloud_db:
condition: service_healthy # Requires healthy database
nextcloud_valkey:
condition: service_healthy # Requires healthy valkey
# Environment variables
environment:
# Database configuration
- POSTGRES_HOST=nextcloud_db:5432 # PostgreSQL host and port
- POSTGRES_DB=${POSTGRES_DB} # Database name
- POSTGRES_USER=${POSTGRES_USER} # Database username
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # Database password
# Redis/Valkey cache configuration
- REDIS_HOST=nextcloud_valkey # Valkey hostname
- REDIS_HOST_PORT=6379 # Valkey port
- REDIS_HOST_PASSWORD=${VALKEY_PASSWORD} # Valkey authentication
# PHP performance tuning
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT} # Memory allocation
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT} # Max upload size
# Security settings
- APACHE_DISABLE_REWRITE_IP=${APACHE_DISABLE_REWRITE_IP} # IP address handling
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/nextcloud/app:/var/www/html
# Network configuration
ports:
- ${APP_PORT}:80
# Nextcloud Cron Service
nextcloud_cron:
# Basic container configuration
image: docker.io/library/nextcloud:31.0.7
container_name: nextcloud_cron
restart: unless-stopped
# Service dependencies
depends_on:
- nextcloud_app
# Entry point
entrypoint: /cron.sh
# Environment variables
environment:
# Database configuration
- POSTGRES_HOST=nextcloud_db:5432 # PostgreSQL host and port
- POSTGRES_DB=${POSTGRES_DB} # Database name
- POSTGRES_USER=${POSTGRES_USER} # Database username
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # Database password
# Redis/Valkey cache configuration
- REDIS_HOST=nextcloud_valkey # Valkey hostname
- REDIS_HOST_PORT=6379 # Valkey port
- REDIS_HOST_PASSWORD=${VALKEY_PASSWORD} # Valkey authentication
# PHP performance tuning
- PHP_MEMORY_LIMIT=${PHP_MEMORY_LIMIT} # Memory allocation
- PHP_UPLOAD_LIMIT=${PHP_UPLOAD_LIMIT} # Max upload size
# Security settings
- APACHE_DISABLE_REWRITE_IP=${APACHE_DISABLE_REWRITE_IP} # IP address handling
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/nextcloud/app:/var/www/html

View File

@@ -1,58 +0,0 @@
services:
opengist_db:
image: docker.io/library/mariadb:11.8.2
container_name: opengist_db
command: --transaction-isolation=READ-COMMITTED --binlog-format=ROW --innodb-file-per-table=1 --skip-innodb-read-only-compressed
environment:
- MYSQL_ROOT_PASSWORD=${MYSQL_ROOT_PASSWORD}
- MYSQL_USER=${MYSQL_USER}
- MYSQL_PASSWORD=${MYSQL_PASSWORD}
- MYSQL_DATABASE=${MYSQL_DATABASE}
volumes:
- ${APPDATA_PATH}/opengist/db:/var/lib/mysql
ports:
- ${DB_PORT}:3306
networks:
- backend
restart: unless-stopped
healthcheck:
test: ["CMD", "healthcheck.sh", "--connect", "--innodb_initialized"]
start_period: 10s
interval: 10s
timeout: 5s
retries: 3
opengist_server:
image: ghcr.io/thomiceli/opengist:1.10.0
container_name: opengist_server
depends_on:
opengist_db:
condition: service_healthy
environment:
- UID=${UID}
- GID=${GID}
- OG_DB_URI=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@opengist_db:3306/${MYSQL_DATABASE}
- OG_EXTERNAL_URL=${OG_EXTERNAL_URL}
- OG_SECRET_KEY=${OG_SECRET_KEY}
- OG_HTTP_GIT_ENABLED=${OG_HTTP_GIT_ENABLED}
- OG_SSH_GIT_ENABLED=${OG_SSH_GIT_ENABLED}
- OG_GITEA_CLIENT_KEY=${OG_GITEA_CLIENT_KEY}
- OG_GITEA_SECRET=${OG_GITEA_SECRET}
- OG_GITEA_URL=${OG_GITEA_URL}
- OG_GITEA_NAME=${OG_GITEA_NAME}
- OG_CUSTOM_STATIC_LINK_0_NAME=${OG_CUSTOM_STATIC_LINK_0_NAME}
- OG_CUSTOM_STATIC_LINK_0_PATH=${OG_CUSTOM_STATIC_LINK_0_PATH}
volumes:
- ${APPDATA_PATH}/opengist/data:/opengist
ports:
- ${SERVER_PORT}:6157
networks:
- frontend
- backend
restart: unless-stopped
networks:
frontend:
external: true
backend:
external: true

View File

@@ -1,56 +1,78 @@
# OpenGist Git Snippet Service Configuration
services: services:
# PostgreSQL Database Service
opengist_db: opengist_db:
image: docker.io/library/postgres:17.5 # Basic container configuration
container_name: opengist_db container_name: opengist_db
environment: image: docker.io/library/postgres:17.5
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
volumes:
- ${APPDATA_PATH}/opengist/db:/var/lib/postgresql/data
ports:
- ${DB_PORT}:5432
networks:
- backend
restart: unless-stopped restart: unless-stopped
# Database credentials
environment:
- POSTGRES_USER=${POSTGRES_USER} # Database username
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD} # Database password
- POSTGRES_DB=${POSTGRES_DB} # Database name
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/opengist/db:/var/lib/postgresql/data # Database files
# Network configuration
ports:
- ${DB_PORT}:5432 # PostgreSQL port
# Health monitoring
healthcheck: healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"] test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"]
start_period: 10s interval: 10s # Check frequency
interval: 10s timeout: 5s # Timeout duration
timeout: 5s retries: 3 # Retry attempts
retries: 3 start_period: 10s # Initial delay
# OpenGist Application Service
opengist_server: opengist_server:
image: ghcr.io/thomiceli/opengist:1.10.0 # Basic container configuration
container_name: opengist_server container_name: opengist_server
depends_on: image: ghcr.io/thomiceli/opengist:1.10.0
opengist_db:
condition: service_healthy
environment:
- UID=${UID}
- GID=${GID}
- OG_DB_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@opengist_db:5432/${POSTGRES_DB}
- OG_EXTERNAL_URL=${OG_EXTERNAL_URL}
- OG_SECRET_KEY=${OG_SECRET_KEY}
- OG_HTTP_GIT_ENABLED=${OG_HTTP_GIT_ENABLED}
- OG_SSH_GIT_ENABLED=${OG_SSH_GIT_ENABLED}
- OG_GITEA_CLIENT_KEY=${OG_GITEA_CLIENT_KEY}
- OG_GITEA_SECRET=${OG_GITEA_SECRET}
- OG_GITEA_URL=${OG_GITEA_URL}
- OG_GITEA_NAME=${OG_GITEA_NAME}
- OG_CUSTOM_STATIC_LINK_0_NAME=${OG_CUSTOM_STATIC_LINK_0_NAME}
- OG_CUSTOM_STATIC_LINK_0_PATH=${OG_CUSTOM_STATIC_LINK_0_PATH}
volumes:
- ${APPDATA_PATH}/opengist/data:/opengist
ports:
- ${SERVER_PORT}:6157
networks:
- frontend
- backend
restart: unless-stopped restart: unless-stopped
networks: # Service dependencies
frontend: depends_on:
external: true opengist_db:
backend: condition: service_healthy # Wait for healthy database
external: true
# Runtime configuration
environment:
# User and group IDs for file permissions
- UID=${UID} # User ID for file permissions
- GID=${GID} # Group ID for file permissions
# Database connection
- OG_DB_URI=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@opengist_db:5432/${POSTGRES_DB} # PostgreSQL connection string
# Application settings
- OG_EXTERNAL_URL=${OG_EXTERNAL_URL} # Public URL for OpenGist
- OG_SECRET_KEY=${OG_SECRET_KEY} # Encryption key for sessions
# Git protocol configuration
- OG_HTTP_GIT_ENABLED=${OG_HTTP_GIT_ENABLED} # Enable HTTP Git access
- OG_SSH_GIT_ENABLED=${OG_SSH_GIT_ENABLED} # Enable SSH Git access
# Gitea integration
- OG_GITEA_CLIENT_KEY=${OG_GITEA_CLIENT_KEY} # OAuth client key
- OG_GITEA_SECRET=${OG_GITEA_SECRET} # OAuth secret
- OG_GITEA_URL=${OG_GITEA_URL} # Gitea instance URL
- OG_GITEA_NAME=${OG_GITEA_NAME} # Gitea application name
# Customization
- OG_CUSTOM_STATIC_LINK_0_NAME=${OG_CUSTOM_STATIC_LINK_0_NAME} # Custom link name
- OG_CUSTOM_STATIC_LINK_0_PATH=${OG_CUSTOM_STATIC_LINK_0_PATH} # Custom link path
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/opengist/data:/opengist
# Network configuration
ports:
- ${SERVER_PORT}:6157 # Web interface port
- 2222:2222 # SSH port for Git operations

View File

@@ -0,0 +1,37 @@
# Palmr File Transfer Service Configuration
services:
palmr:
# Basic container configuration
container_name: palmr
image: docker.io/kyantech/palmr:v3.1.3-beta
restart: unless-stopped
# Application settings
environment:
# Storage Configuration
- ENABLE_S3=true # Set to 'true' to enable S3-compatible storage backend, 'false' for local filesystem
# S3 Configuration
- S3_ENDPOINT=${S3_ENDPOINT} # S3 endpoint (you have to set this to the s3 endpoint of the s3 server) CHANGE THIS TO YOUR S3 ENDPOINT
- S3_USE_SSL=true # Use ssl for the s3 server always true for s3
- S3_ACCESS_KEY=${S3_ACCESS_KEY} # S3 access key
- S3_SECRET_KEY=${S3_SECRET_KEY} # S3 secret key
- S3_REGION=us-east-1 # S3 region (us-east-1 is the default region) but it depends on your s3 server region
- S3_BUCKET_NAME=${S3_BUCKET_NAME} # Bucket name for the S3 storage
- S3_FORCE_PATH_STYLE=true # For MinIO compatibility we have to set this to true
# Security Settings
- ENCRYPTION_KEY=${ENCRYPTION_KEY} # Data encryption key for security
- SECURE_SITE=${SECURE_SITE} # Set to 'true' to enable HTTPS and security headers
# User/Group Permissions
- PALMR_UID=${PUID} # User ID under which the application runs
- PALMR_GID=${PGID} # Group ID under which the application runs
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/palmr/server:/app/server # Database directory
# Network configuration
ports:
- ${PORT}:5487 # Web interface port

View File

@@ -1,22 +1,28 @@
# Palmr File Transfer Service Configuration
services: services:
palmr: palmr:
image: docker.io/kyantech/palmr:v3.1.0-beta # Basic container configuration
container_name: palmr container_name: palmr
environment: image: docker.io/kyantech/palmr:v3.1.3-beta
- ENABLE_S3=${ENABLE_S3}
- ENCRYPTION_KEY=${ENCRYPTION_KEY}
- SECURE_SITE=${SECURE_SITE}
- PALMR_UID=${PUID}
- PALMR_GID=${PGID}
volumes:
- ${APPDATA_PATH}/palmr/db:/app/server/prisma
- ${DATA_PATH}/palmr/data:/app/server
ports:
- ${PORT}:5487
networks:
- frontend
restart: unless-stopped restart: unless-stopped
networks: # Application settings
frontend: environment:
external: true # Storage Configuration
- ENABLE_S3=false
# Security Settings
- ENCRYPTION_KEY=${ENCRYPTION_KEY} # Data encryption key for security
- SECURE_SITE=${SECURE_SITE} # Enable/Disable HTTPS security features
# User/Group Permissions
- PALMR_UID=${PUID} # User ID for proper file permissions
- PALMR_GID=${PGID} # Group ID for proper file permissions
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/palmr/server:/app/server # Database and File storage directory
# Network configuration
ports:
- ${PORT}:5487 # Web interface port

View File

@@ -1,37 +1,16 @@
# Portainer Agent Configuration
services: services:
portainer-agent: portainer-agent:
image: docker.io/portainer/agent:latest # Basic container configuration
container_name: portainer-agent container_name: portainer-agent
volumes: image: docker.io/portainer/agent:alpine
- /var/run/docker.sock:/var/run/docker.sock
- /var/lib/docker/volumes:/var/lib/docker/volumes
ports:
- 9001:9001
networks:
- frontend
- backend
- internal
restart: unless-stopped restart: unless-stopped
networks: # System access configuration
frontend: volumes:
name: frontend - /var/run/docker.sock:/var/run/docker.sock # Docker API access
driver: bridge - /var/lib/docker/volumes:/var/lib/docker/volumes # Volume management
ipam:
config: # Network port configuration
- subnet: 172.30.10.0/24 ports:
gateway: 172.30.10.1 - 9001:9001 # Agent communication port
backend:
name: backend
driver: bridge
ipam:
config:
- subnet: 172.30.20.0/24
gateway: 172.30.20.1
internal:
name: internal
driver: bridge
ipam:
config:
- subnet: 172.30.30.0/24
gateway: 172.30.30.1

View File

@@ -1,39 +1,26 @@
# Portainer Container Management Configuration
services: services:
portainer: portainer:
image: docker.io/portainer/portainer-ee:latest # Basic container configuration
container_name: portainer container_name: portainer
volumes: image: docker.io/portainer/portainer-ee:alpine
- ./data:/data
- /etc/localtime:/etc/localtime:ro
- /var/run/docker.sock:/var/run/docker.sock
ports:
- 8000:8000
- 9443:9443
networks:
- frontend
- backend
- internal
restart: unless-stopped restart: unless-stopped
networks: # Persistent storage and system access configuration
frontend: volumes:
name: frontend - ./data:/data # Portainer configuration and database
driver: bridge - /etc/localtime:/etc/localtime:ro # Sync host timezone
ipam: - /var/run/docker.sock:/var/run/docker.sock # Docker API access
config:
- subnet: 172.30.10.0/24 # Network ports configuration
gateway: 172.30.10.1 ports:
backend: - 8000:8000 # Edge agent communication port
name: backend - 9443:9443 # Web UI HTTPS port
driver: bridge
ipam: # Health check configuration
config: healthcheck:
- subnet: 172.30.20.0/24 test: "wget --no-verbose --tries=1 --spider http://localhost:9000/api/system/status || exit 1"
gateway: 172.30.20.1 start_period: 10s # Initial delay before checks
internal: interval: 10s # Check interval
name: internal timeout: 5s # Check timeout
driver: bridge retries: 3 # Allowed retries
ipam:
config:
- subnet: 172.30.30.0/24
gateway: 172.30.30.1

View File

@@ -1,36 +1,46 @@
# Radicale CalDAV/CardDAV Server Configuration
services: services:
radicale: radicale:
image: docker.io/tomsquest/docker-radicale:3.5.4.0 # Basic container configuration
container_name: radicale container_name: radicale
init: true image: docker.io/tomsquest/docker-radicale:3.5.4.0
read_only: true restart: unless-stopped
security_opt:
- no-new-privileges:true # Security hardening
cap_drop: init: true # Use init process for proper signal handling
- ALL read_only: true # Read-only filesystem for security
# Minimal required capabilities
cap_add: cap_add:
- SETUID - CHOWN # Required for file ownership changes
- SETGID - KILL # Required for process management
- CHOWN - SETGID # Required for group permissions
- KILL - SETUID # Required for user permissions
# Security restrictions
cap_drop:
- ALL # Drop all capabilities by default
security_opt:
- no-new-privileges:true # Prevent privilege escalation
# Resource limits
deploy: deploy:
resources: resources:
limits: limits:
memory: 256M memory: 256M # Memory limit
pids: 50 pids: 50 # Maximum number of processes
volumes:
- ${APPDATA_PATH}/radicale/data:/data
- ${APPDATA_PATH}/radicale/config:/config:ro
ports:
- ${PORT}:5232
networks:
- frontend
restart: unless-stopped
healthcheck:
test: curl -f http://127.0.0.1:5232 || exit 1
interval: 30s
retries: 3
networks: # Persistent storage configuration
frontend: volumes:
external: true - ${APPDATA_PATH}/radicale/data:/data # Calendar and contact data
- ${APPDATA_PATH}/radicale/config:/config:ro # Read-only configuration
# Network configuration
ports:
- ${PORT}:5232 # DAV service port
# Health monitoring
healthcheck:
test: curl -f http://127.0.0.1:5232 || exit 1 # Simple HTTP check
interval: 30s # Check every 30 seconds
retries: 3 # Allow 3 failures before marking unhealthy

View File

@@ -15,7 +15,13 @@
} }
], ],
"ignorePaths": [ "ignorePaths": [
"archived/" ".archived/"
],
"packageRules": [
{
"matchPackageNames": "docker.io/minio/minio",
"versioning": "regex:^RELEASE\\.(?<major>\\d{4})-(?<minor>\\d{2})-(?<patch>\\d{2})T\\d{2}-\\d{2}-\\d{2}Z$"
}
] ]
} }
} }

View File

@@ -1,24 +1,37 @@
# Stirling PDF Service Configuration
services: services:
stirling-pdf: stirling-pdf:
image: ghcr.io/stirling-tools/stirling-pdf:1.0.1-fat # Basic container configuration
container_name: stirling-pdf container_name: stirling-pdf
environment: image: ghcr.io/stirling-tools/stirling-pdf:1.0.2-fat # Full-featured image
- DISABLE_ADDITIONAL_FEATURES=${DISABLE_ADDITIONAL_FEATURES} restart: unless-stopped # Auto-recover from crashes
- DOCKER_ENABLE_SECURITY=${DOCKER_ENABLE_SECURITY}
- SECURITY_ENABLELOGIN=${SECURITY_ENABLELOGIN}
- LANGS=${LANGS}
volumes:
- ${APPDATA_DATA}/stirling-pdf/training_data:/usr/share/tessdata
- ${APPDATA_DATA}/stirling-pdf/config:/configs
- ${APPDATA_DATA}/stirling-pdf/custom_files:/customFiles/
- ${APPDATA_DATA}/stirling-pdf/logs:/logs/
- ${APPDATA_DATA}/stirling-pdf/pipeline:/pipeline/
ports:
- ${PORT}:8080
networks:
- frontend
restart: unless-stopped
networks: # Application settings
frontend: environment:
external: true - DISABLE_ADDITIONAL_FEATURES=${DISABLE_ADDITIONAL_FEATURES} # Toggle extra features
- DOCKER_ENABLE_SECURITY=${DOCKER_ENABLE_SECURITY} # Enable security restrictions
- SECURITY_ENABLELOGIN=${SECURITY_ENABLELOGIN} # Require authentication
- LANGS=${LANGS} # Supported languages for OCR
- SHOW_SURVEY=false # Disable user surveys
- DISABLE_PIXEL=true # Disable pixel tracking
- SYSTEM_ENABLEANALYTICS=false # Disable analytics
# Persistent storage configuration
volumes:
- ${APPDATA_DATA}/stirling-pdf/training_data:/usr/share/tessdata # OCR training data
- ${APPDATA_DATA}/stirling-pdf/config:/configs # Configuration files
- ${APPDATA_DATA}/stirling-pdf/custom_files:/customFiles/ # User uploads
- ${APPDATA_DATA}/stirling-pdf/logs:/logs/ # Application logs
- ${APPDATA_DATA}/stirling-pdf/pipeline:/pipeline/ # Processing pipelines
# Network configuration
ports:
- ${PORT}:8080 # Web interface port
# Health check configuration
healthcheck:
test: [ "CMD-SHELL", "curl -f http://localhost:8080/api/v1/info/status | grep -q 'UP'" ]
interval: 5s
timeout: 10s
retries: 5
start_period: 120s

View File

@@ -1,18 +1,27 @@
# SyncYomi Configuration - Manga/Comic Reader Sync Service
services: services:
syncyomi: syncyomi:
# Basic container configuration
container_name: syncyomi container_name: syncyomi
image: ghcr.io/syncyomi/syncyomi:v1.1.4 image: ghcr.io/syncyomi/syncyomi:v1.1.4
environment: restart: unless-stopped # Auto-restart on failure
- TZ=${TZ}
volumes:
- ${APPDATA_PATH}/syncyomi/config:/config
- ${APPDATA_PATH}/syncyomi/log:/log
ports:
- ${PORT}:8282
networks:
- frontend
restart: unless-stopped
networks: # Application settings
frontend: environment:
external: true - TZ=${TZ} # Timezone for proper timestamp handling
# Persistent storage configuration
volumes:
- ${APPDATA_PATH}/syncyomi/config:/config # Configuration files
- ${APPDATA_PATH}/syncyomi/log:/log # Application logs
# Network configuration
ports:
- ${PORT}:8282 # Web interface port
# Health check configuration
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8282"]
interval: 10s
timeout: 10s
retries: 3

View File

@@ -1,15 +1,15 @@
# Uptime Kuma Configuration - Status Monitoring Service
services: services:
uptime-kuma: uptime-kuma:
image: docker.io/louislam/uptime-kuma:1.23.16 # Basic container configuration
container_name: uptime-kuma container_name: uptime-kuma
volumes: image: docker.io/louislam/uptime-kuma:1.23.16
- ${APPDATA_PATH}/uptime-kuma/config:/app/data restart: unless-stopped # Auto-recover from crashes
ports:
- ${PORT}:3001
networks:
- frontend
restart: unless-stopped
networks: # Persistent storage configuration
frontend: volumes:
external: true - ${APPDATA_PATH}/uptime-kuma/config:/app/data # Monitoring configuration and data
# Network configuration
ports:
- ${PORT}:3001 # Web dashboard port

View File

@@ -1,52 +0,0 @@
services:
vaultwarden_db:
image: docker.io/library/postgres:17.5
container_name: vaultwarden_db
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
volumes:
- ${APPDATA_PATH}/vaultwarden/db:/var/lib/postgresql/data
ports:
- ${DB_PORT}:5432
networks:
- backend
restart: unless-stopped
healthcheck:
test: ["CMD-SHELL", "pg_isready -d ${POSTGRES_DB} -U ${POSTGRES_USER}"]
start_period: 20s
interval: 30s
retries: 5
timeout: 5s
vaultwarden_server:
image: ghcr.io/dani-garcia/vaultwarden:1.34.1
container_name: vaultwarden_server
depends_on:
vaultwarden_db:
condition: service_healthy
environment:
- PUID=${PUID}
- PGID=${PGID}
- TZ=${TZ}
- DATABASE_URL=postgresql://${POSTGRES_USER}:${POSTGRES_PASSWORD}@vaultwarden_db:5432/${POSTGRES_DB}
- DOMAIN=${DOMAIN}
- WEBSOCKET_ENABLED=${WEBSOCKET_ENABLED}
- SIGNUPS_ALLOWED=${SIGNUPS_ALLOWED}
- ADMIN_TOKEN=${ADMIN_TOKEN}
- LOG_FILE=/data/vaultwarden.log
volumes:
- ${APPDATA_PATH}/vaultwarden/data:/data/
ports:
- ${SERVER_PORT}:80
networks:
- frontend
- backend
restart: unless-stopped
networks:
frontend:
external: true
backend:
external: true