Files
usda-vision/docker-compose.yml
salirezav 8cb45cbe03 Refactor Supabase services in docker-compose.yml for better organization and testing
- Commented out all Supabase services to facilitate testing with Supabase CLI.
- Updated README to include Supabase directory in project structure.
- Adjusted documentation for migration paths in Supabase Docker Compose guide.
- Enhanced docker-compose-reset.sh to explicitly remove Supabase volumes and wait for migrations to complete.
2025-12-18 18:27:04 -05:00

507 lines
18 KiB
YAML

networks:
usda-vision-network:
driver: bridge
volumes:
supabase-db:
driver: local
supabase-storage:
services:
# ============================================================================
# Supabase Services (Database & Backend) - COMMENTED OUT
# ============================================================================
# All Supabase services are grouped together and labeled with "supabase"
# namespace for better organization. They start by default with all services.
# To manage Supabase services separately, use:
# - Filter by label: docker compose ps --filter "label=com.usda-vision.service=supabase"
# - Or use service names: docker compose ps supabase-*
#
# NOTE: Currently commented out to test Supabase CLI setup from management-dashboard-web-app
# # # Supabase Database
# # supabase-db:
# container_name: usda-vision-supabase-db
# image: public.ecr.aws/supabase/postgres:17.4.1.068
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=database"
# healthcheck:
# test: ["CMD-SHELL", "pg_isready -U postgres -d postgres"]
# interval: 10s
# timeout: 5s
# retries: 5
# start_period: 30s
# volumes:
# - supabase-db:/var/lib/postgresql/data
# environment:
# POSTGRES_HOST: /var/run/postgresql
# PGPORT: 5432
# POSTGRES_PORT: 5432
# PGDATABASE: postgres
# POSTGRES_DB: postgres
# # Use postgres as the default user (Supabase image will create supabase_admin during init)
# PGUSER: postgres
# POSTGRES_USER: postgres
# PGPASSWORD: ${POSTGRES_PASSWORD:-postgres}
# POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# # Supabase-specific environment variables
# JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# JWT_EXP: ${JWT_EXP:-3600}
# ports:
# - "54322:5432"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase PostgREST API
# supabase-rest:
# container_name: usda-vision-supabase-rest
# image: public.ecr.aws/supabase/postgrest:v12.2.12
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=api"
# depends_on:
# supabase-db:
# condition: service_healthy
# supabase-migrate:
# condition: service_completed_successfully
# environment:
# PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD:-postgres}@supabase-db:5432/postgres
# PGRST_DB_SCHEMAS: public,graphql_public
# PGRST_DB_EXTRA_SEARCH_PATH: public,extensions
# PGRST_DB_ANON_ROLE: anon
# PGRST_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# PGRST_DB_USE_LEGACY_GUCS: "false"
# PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
# PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXP:-3600}
# ports:
# - "54321:3000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase GoTrue (Auth)
# supabase-auth:
# container_name: usda-vision-supabase-auth
# image: public.ecr.aws/supabase/gotrue:v2.177.0
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=auth"
# depends_on:
# supabase-db:
# condition: service_healthy
# environment:
# GOTRUE_API_HOST: 0.0.0.0
# GOTRUE_API_PORT: 9999
# API_EXTERNAL_URL: http://localhost:54321
# GOTRUE_DB_DRIVER: postgres
# GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD:-postgres}@supabase-db:5432/postgres
# GOTRUE_SITE_URL: http://localhost:8080
# GOTRUE_URI_ALLOW_LIST: http://localhost:8080,http://localhost:3000,https://localhost:3000
# GOTRUE_DISABLE_SIGNUP: "false"
# GOTRUE_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# GOTRUE_JWT_EXP: ${JWT_EXP:-3600}
# GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
# GOTRUE_EXTERNAL_EMAIL_ENABLED: "true"
# GOTRUE_MAILER_AUTOCONFIRM: "true"
# GOTRUE_SMS_AUTOCONFIRM: "true"
# GOTRUE_SMS_PROVIDER: twilio
# GOTRUE_ENABLE_SIGNUP: "true"
# GOTRUE_ENABLE_ANONYMOUS_SIGN_INS: "false"
# GOTRUE_ENABLE_MANUAL_LINKING: "false"
# GOTRUE_PASSWORD_MIN_LENGTH: 6
# GOTRUE_REFRESH_TOKEN_ROTATION_ENABLED: "true"
# GOTRUE_REFRESH_TOKEN_REUSE_INTERVAL: 10
# ports:
# - "9999:9999"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Realtime
# supabase-realtime:
# container_name: usda-vision-supabase-realtime
# image: public.ecr.aws/supabase/realtime:v2.41.10
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=realtime"
# depends_on:
# supabase-db:
# condition: service_healthy
# supabase-rest:
# condition: service_started
# environment:
# PORT: 4000
# DB_HOST: supabase-db
# DB_PORT: 5432
# DB_USER: supabase_realtime_admin
# DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# DB_NAME: postgres
# DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
# DB_ENC_KEY: supabaserealtime
# API_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# FLY_ALLOC_ID: fly123
# FLY_APP_NAME: realtime
# SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
# ERL_AFLAGS: -proto_dist inet_tcp
# ENABLE_TAILSCALE: "false"
# DNS_NODES: "''"
# ports:
# - "4000:4000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Storage
# supabase-storage:
# container_name: usda-vision-supabase-storage
# image: public.ecr.aws/supabase/storage-api:v1.25.12
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=storage"
# depends_on:
# supabase-db:
# condition: service_healthy
# supabase-rest:
# condition: service_started
# environment:
# ANON_KEY: ${ANON_KEY:-[REDACTED]}
# SERVICE_KEY: ${SERVICE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU}
# POSTGREST_URL: http://supabase-rest:3000
# PGRST_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD:-postgres}@supabase-db:5432/postgres
# FILE_SIZE_LIMIT: 52428800
# STORAGE_BACKEND: file
# FILE_STORAGE_BACKEND_PATH: /var/lib/storage
# TENANT_ID: stub
# REGION: stub
# GLOBAL_S3_BUCKET: stub
# ENABLE_IMAGE_TRANSFORMATION: "false"
# volumes:
# - supabase-storage:/var/lib/storage
# ports:
# - "5000:5000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Studio
# supabase-studio:
# container_name: usda-vision-supabase-studio
# image: public.ecr.aws/supabase/studio:2025.07.28-sha-578b707
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=studio"
# depends_on:
# supabase-rest:
# condition: service_started
# supabase-auth:
# condition: service_started
# environment:
# STUDIO_PG_META_URL: http://supabase-meta:8080
# POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# DEFAULT_ORGANIZATION_NAME: Default Organization
# DEFAULT_PROJECT_NAME: Default Project
# SUPABASE_URL: http://supabase-rest:3000
# SUPABASE_PUBLIC_URL: http://localhost:54321
# SUPABASE_ANON_KEY: ${ANON_KEY:-[REDACTED]}
# SUPABASE_SERVICE_KEY: ${SERVICE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU}
# ports:
# - "54323:3000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Meta (for Studio)
# supabase-meta:
# container_name: usda-vision-supabase-meta
# image: public.ecr.aws/supabase/postgres-meta:v0.91.3
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=meta"
# depends_on:
# supabase-db:
# condition: service_healthy
# environment:
# PG_META_PORT: 8080
# PG_META_DB_HOST: supabase-db
# PG_META_DB_PORT: 5432
# PG_META_DB_NAME: postgres
# PG_META_DB_USER: supabase_admin
# PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
# ports:
# - "54328:8080"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Migration Runner - runs migrations after database is ready
# supabase-migrate:
# container_name: usda-vision-supabase-migrate
# image: postgres:17-alpine
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=migration"
# depends_on:
# supabase-db:
# condition: service_healthy
# environment:
# PGHOST: supabase-db
# PGPORT: 5432
# PGDATABASE: postgres
# PGUSER: postgres
# PGPASSWORD: ${POSTGRES_PASSWORD:-postgres}
# volumes:
# - ./supabase/migrations:/migrations:ro
# - ./supabase/seed_01_users.sql:/seed_01_users.sql:ro
# - ./supabase/seed_02_phase2_experiments.sql:/seed_02_phase2_experiments.sql:ro
# command: >
# sh -c "
# echo 'Waiting for database to be ready...';
# until pg_isready -h supabase-db -p 5432 -U postgres -d postgres; do
# sleep 2;
# done;
# echo 'Waiting for Supabase initialization to complete...';
# # Wait for Supabase schemas to be initialized (auth schema is a good indicator)
# until psql -h supabase-db -U postgres -d postgres -tc \"SELECT 1 FROM information_schema.schemata WHERE schema_name = 'auth'\" | grep -q 1; do
# echo ' Waiting for Supabase schemas...';
# sleep 3;
# done;
# echo 'Database and Supabase schemas are ready. Running migrations...';
# for migration in /migrations/*.sql; do
# if [ -f \"\$$migration\" ]; then
# echo \"Running migration: \$$(basename \$$migration)\";
# psql -h supabase-db -U postgres -d postgres -f \$$migration || echo \"Migration \$$(basename \$$migration) may have already been applied\";
# fi;
# done;
# echo 'Running seed files...';
# psql -h supabase-db -U postgres -d postgres -f /seed_01_users.sql || echo 'Seed 01 may have already been applied';
# psql -h supabase-db -U postgres -d postgres -f /seed_02_phase2_experiments.sql || echo 'Seed 02 may have already been applied';
# echo 'Migrations and seeds completed!';
# "
# networks:
# - usda-vision-network
# restart: "no"
#
# # Supabase Inbucket (Email Testing)
# supabase-inbucket:
# container_name: usda-vision-supabase-inbucket
# image: inbucket/inbucket:stable
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=email"
# ports:
# - "54324:9000"
# - "54325:2500"
# - "54326:1100"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
api:
container_name: usda-vision-api
build:
context: ./camera-management-api
dockerfile: Dockerfile
working_dir: /app
restart: unless-stopped # Automatically restart container if it fails or exits
healthcheck:
test: ["CMD-SHELL", "python3 -c 'import urllib.request; urllib.request.urlopen(\"http://localhost:8000/health\").read()' || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
volumes:
- ./camera-management-api:/app
- /mnt/nfs_share:/mnt/nfs_share
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
environment:
- PYTHONUNBUFFERED=1
- LD_LIBRARY_PATH=/usr/local/lib:/lib:/usr/lib
- PYTHONPATH=/app:/app/camera_sdk
- TZ=America/New_York
- MEDIAMTX_HOST=localhost
- MEDIAMTX_RTSP_PORT=8554
command: >
sh -lc "
set -e # Exit on error
# Only install system packages if not already installed (check for ffmpeg)
if ! command -v ffmpeg &> /dev/null; then
echo 'Installing system dependencies...';
apt-get update && apt-get install -y --no-install-recommends libusb-1.0-0-dev ffmpeg;
else
echo 'System dependencies already installed';
fi
# Install camera SDK if not already installed
if [ ! -f /lib/libMVSDK.so ] && [ -f 'camera_sdk/linuxSDK_V2.1.0.49(250108)/install.sh' ]; then
echo 'Installing camera SDK...';
cd 'camera_sdk/linuxSDK_V2.1.0.49(250108)';
chmod +x install.sh;
./install.sh || echo 'Warning: Camera SDK installation may have failed';
cd /app;
else
echo 'Camera SDK already installed or install script not found';
fi;
# Install Python dependencies (only if requirements.txt changed or packages missing)
if [ -f requirements.txt ]; then
pip install --no-cache-dir -r requirements.txt || echo 'Warning: Some Python packages may have failed to install';
else
pip install --no-cache-dir -e . || echo 'Warning: Package installation may have failed';
fi;
# Start the application with error handling
echo 'Starting USDA Vision Camera System...';
python main.py --config config.compose.json || {
echo 'Application exited with error code: $?';
echo 'Waiting 5 seconds before exit...';
sleep 5;
exit 1;
}
"
network_mode: host
web:
container_name: usda-vision-web
image: node:20-alpine
working_dir: /app
env_file:
- ./management-dashboard-web-app/.env
volumes:
- ./management-dashboard-web-app:/app
environment:
- CHOKIDAR_USEPOLLING=true
- TZ=America/New_York
- VITE_SUPABASE_URL=http://localhost:54321
command: >
sh -lc "
npm install;
npm run dev -- --host 0.0.0.0 --port 8080
"
# Ensure the web container can resolve host.docker.internal on Linux
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "8080:8080"
networks:
- usda-vision-network
# depends_on:
# supabase-rest:
# condition: service_started
# supabase-auth:
# condition: service_started
# NOTE: Supabase dependencies commented out - using Supabase CLI instead
video-remote:
container_name: usda-vision-video-remote
image: node:20-alpine
working_dir: /app
environment:
- CHOKIDAR_USEPOLLING=true
- TZ=America/New_York
- VITE_MEDIA_API_URL=http://exp-dash:8090
- VITE_VISION_API_URL=http://exp-dash:8000
volumes:
- ./video-remote:/app
command: >
sh -lc "
npm install;
npm run dev:watch
"
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "3001:3001"
networks:
- usda-vision-network
vision-system-remote:
container_name: usda-vision-vision-system-remote
image: node:20-alpine
working_dir: /app
environment:
- CHOKIDAR_USEPOLLING=true
- TZ=America/New_York
- VITE_VISION_API_URL=http://exp-dash:8000
volumes:
- ./vision-system-remote:/app
command: >
sh -lc "
npm install;
npm run dev:watch
"
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "3002:3002"
networks:
- usda-vision-network
scheduling-remote:
container_name: usda-vision-scheduling-remote
image: node:20-alpine
working_dir: /app
env_file:
- ./management-dashboard-web-app/.env
environment:
- CHOKIDAR_USEPOLLING=true
- TZ=America/New_York
volumes:
- ./scheduling-remote:/app
command: >
sh -lc "
npm install;
npm run dev:watch
"
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "3003:3003"
networks:
- usda-vision-network
media-api:
container_name: usda-vision-media-api
build:
context: ./media-api
dockerfile: Dockerfile
environment:
- MEDIA_VIDEOS_DIR=/mnt/nfs_share
- MEDIA_THUMBS_DIR=/mnt/nfs_share/.thumbnails
- MAX_CONCURRENT_TRANSCODING=2 # Limit concurrent transcoding operations
volumes:
- /mnt/nfs_share:/mnt/nfs_share
ports:
- "8090:8090"
networks:
- usda-vision-network
deploy:
resources:
limits:
cpus: '4' # Limit to 4 CPU cores (adjust based on your system)
memory: 2G # Limit to 2GB RAM per container
reservations:
cpus: '1' # Reserve at least 1 CPU core
memory: 512M # Reserve at least 512MB RAM
# Alternative syntax for older Docker Compose versions:
# cpus: '4'
# mem_limit: 2g
# mem_reservation: 512m
mediamtx:
container_name: usda-vision-mediamtx
image: bluenviron/mediamtx:latest
volumes:
- ./mediamtx.yml:/mediamtx.yml:ro
- /mnt/nfs_share:/mnt/nfs_share:ro
ports:
- "8554:8554" # RTSP
- "8889:8889" # WebRTC HTTP API
- "8189:8189" # WebRTC UDP
networks:
- usda-vision-network