version: "3.7" services: db: deploy: # To use an external database, set replicas to 0 and set DATABASE_URL to the external database url in the .env file replicas: 1 image: postgres:16 shm_size: 1g restart: unless-stopped volumes: - db_data:/var/lib/postgresql/data expose: - 5432 ports: - 5432:5432 environment: POSTGRES_PASSWORD: ${DATABASE_PASSWORD} POSTGRES_DB: windmill healthcheck: test: ["CMD-SHELL", "pg_isready -U postgres"] interval: 10s timeout: 5s retries: 5 windmill_server: image: ${WM_IMAGE} pull_policy: always deploy: replicas: 1 restart: unless-stopped expose: - 8000 - 2525 environment: - DATABASE_URL=${DATABASE_URL} - MODE=server depends_on: db: condition: service_healthy volumes: - worker_logs:/tmp/windmill/logs windmill_worker: image: ${WM_IMAGE} pull_policy: always deploy: replicas: 3 resources: limits: cpus: "1" memory: 2048M restart: unless-stopped environment: - DATABASE_URL=${DATABASE_URL} - MODE=worker - WORKER_GROUP=default depends_on: db: condition: service_healthy # to mount the worker folder to debug, KEEP_JOB_DIR=true and mount /tmp/windmill volumes: # mount the docker socket to allow to run docker containers from within the workers - /var/run/docker.sock:/var/run/docker.sock - worker_dependency_cache:/tmp/windmill/cache - worker_logs:/tmp/windmill/logs ## This worker is specialized for "native" jobs. Native jobs run in-process and thus are much more lightweight than other jobs windmill_worker_native: # Use ghcr.io/windmill-labs/windmill-ee:main for the ee image: ${WM_IMAGE} pull_policy: always deploy: replicas: 1 resources: limits: cpus: "1" memory: 2048M restart: unless-stopped environment: - DATABASE_URL=${DATABASE_URL} - MODE=worker - WORKER_GROUP=native - NUM_WORKERS=8 - SLEEP_QUEUE=200 depends_on: db: condition: service_healthy volumes: - worker_logs:/tmp/windmill/logs ## This worker is specialized for reports or scraping jobs. It is assigned the "reports" worker group which has an init script that installs chromium and can be targeted by using the "chromium" worker tag. # windmill_worker_reports: # image: ${WM_IMAGE} # pull_policy: always # deploy: # replicas: 1 # resources: # limits: # cpus: "1" # memory: 2048M # restart: unless-stopped # environment: # - DATABASE_URL=${DATABASE_URL} # - MODE=worker # - WORKER_GROUP=reports # depends_on: # db: # condition: service_healthy # # to mount the worker folder to debug, KEEP_JOB_DIR=true and mount /tmp/windmill # volumes: # # mount the docker socket to allow to run docker containers from within the workers # - /var/run/docker.sock:/var/run/docker.sock # - worker_dependency_cache:/tmp/windmill/cache # The indexer powers full-text job and log search, an EE feature. windmill_indexer: image: ${WM_IMAGE} pull_policy: always deploy: replicas: 0 # set to 1 to enable full-text job and log search restart: unless-stopped expose: - 8001 environment: - PORT=8001 - DATABASE_URL=${DATABASE_URL} - MODE=indexer - TANTIVY_MAX_INDEXED_JOB_LOG_SIZE__MB=1 # job logs bigger than this will be truncated before indexing - TANTIVY_S3_BACKUP_PERIOD__S=3600 # how often to backup the index into object storage - TANTIVY_INDEX_WRITER_MEMORY_BUDGET__MB=100 # higher budget for higher indexing throughput - TANTIVY_REFRESH_INDEX_PERIOD__S=300 #how often to start indexing new jobs - TANTIVY_DOC_COMMIT_MAX_BATCH_SIZE=100000 #how many documents to batch in one commit - TANTIVY_SHOW_MEMORY_EVERY=10000 #log memory usage and progress every so many documents indexed depends_on: db: condition: service_healthy volumes: - windmill_index:/tmp/windmill/search lsp: image: ghcr.io/windmill-labs/windmill-lsp:latest pull_policy: always restart: unless-stopped expose: - 3001 volumes: - lsp_cache:/root/.cache multiplayer: image: ghcr.io/windmill-labs/windmill-multiplayer:latest deploy: replicas: 0 # Set to 1 to enable multiplayer, only available on Enterprise Edition restart: unless-stopped expose: - 3002 caddy: image: ghcr.io/windmill-labs/caddy-l4:latest restart: unless-stopped # Configure the mounted Caddyfile and the exposed ports or use another reverse proxy if needed volumes: - ./Caddyfile:/etc/caddy/Caddyfile # - ./certs:/certs # Provide custom certificate files like cert.pem and key.pem to enable HTTPS - See the corresponding section in the Caddyfile ports: # To change the exposed port, simply change 80:80 to :80. No other changes needed - "5555:80" - 25:25 # - 443:443 # Uncomment to enable HTTPS handling by Caddy environment: - BASE_URL=":80" # - BASE_URL=":443" # uncomment and comment line above to enable HTTPS via custom certificate and key files # - BASE_URL=mydomain.com # Uncomment and comment line above to enable HTTPS handling by Caddy volumes: db_data: null worker_dependency_cache: null worker_logs: null windmill_index: null lsp_cache: null