Refactor Supabase services in docker-compose.yml for better organization and testing

- Commented out all Supabase services to facilitate testing with Supabase CLI.
- Updated README to include Supabase directory in project structure.
- Adjusted documentation for migration paths in Supabase Docker Compose guide.
- Enhanced docker-compose-reset.sh to explicitly remove Supabase volumes and wait for migrations to complete.
This commit is contained in:
salirezav
2025-12-18 18:27:04 -05:00
parent 93c68768d8
commit 8cb45cbe03
28 changed files with 7264 additions and 253 deletions

View File

@@ -6,6 +6,7 @@ A unified monorepo combining the camera API service and the web dashboard for US
- `camera-management-api/` - Python API service for camera management (USDA-Vision-Cameras)
- `management-dashboard-web-app/` - React web dashboard for experiment management (pecan_experiments)
- `supabase/` - Database configuration, migrations, and seed data (shared infrastructure)
## Quick Start

View File

@@ -8,250 +8,297 @@ volumes:
supabase-storage:
services:
# Supabase Database
supabase-db:
container_name: usda-vision-supabase-db
image: supabase/postgres:17.1.0.147
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
volumes:
- supabase-db:/var/lib/postgresql/data
environment:
POSTGRES_HOST: /var/run/postgresql
PGPORT: 5432
POSTGRES_PORT: 5432
PGDATABASE: postgres
POSTGRES_DB: postgres
PGUSER: supabase_admin
POSTGRES_USER: supabase_admin
PGPASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
JWT_EXP: ${JWT_EXP:-3600}
ports:
- "54322:5432"
networks:
- usda-vision-network
restart: unless-stopped
# Supabase PostgREST API
supabase-rest:
container_name: usda-vision-supabase-rest
image: postgrest/postgrest:v12.2.0
depends_on:
supabase-db:
condition: service_healthy
supabase-migrate:
condition: service_completed_successfully
environment:
PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}@supabase-db:5432/postgres
PGRST_DB_SCHEMAS: public,graphql_public
PGRST_DB_EXTRA_SEARCH_PATH: public,extensions
PGRST_DB_ANON_ROLE: anon
PGRST_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
PGRST_DB_USE_LEGACY_GUCS: "false"
PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXP:-3600}
ports:
- "54321:3000"
networks:
- usda-vision-network
restart: unless-stopped
# Supabase GoTrue (Auth)
supabase-auth:
container_name: usda-vision-supabase-auth
image: supabase/gotrue:v2.156.0
depends_on:
supabase-db:
condition: service_healthy
environment:
GOTRUE_API_HOST: 0.0.0.0
GOTRUE_API_PORT: 9999
API_EXTERNAL_URL: http://localhost:54321
GOTRUE_DB_DRIVER: postgres
GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}@supabase-db:5432/postgres
GOTRUE_SITE_URL: http://localhost:8080
GOTRUE_URI_ALLOW_LIST: http://localhost:8080,http://localhost:3000,https://localhost:3000
GOTRUE_DISABLE_SIGNUP: "false"
GOTRUE_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
GOTRUE_JWT_EXP: ${JWT_EXP:-3600}
GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
GOTRUE_EXTERNAL_EMAIL_ENABLED: "true"
GOTRUE_MAILER_AUTOCONFIRM: "true"
GOTRUE_SMS_AUTOCONFIRM: "true"
GOTRUE_SMS_PROVIDER: twilio
GOTRUE_ENABLE_SIGNUP: "true"
GOTRUE_ENABLE_ANONYMOUS_SIGN_INS: "false"
GOTRUE_ENABLE_MANUAL_LINKING: "false"
GOTRUE_PASSWORD_MIN_LENGTH: 6
GOTRUE_REFRESH_TOKEN_ROTATION_ENABLED: "true"
GOTRUE_REFRESH_TOKEN_REUSE_INTERVAL: 10
ports:
- "9999:9999"
networks:
- usda-vision-network
restart: unless-stopped
# Supabase Realtime
supabase-realtime:
container_name: usda-vision-supabase-realtime
image: supabase/realtime:v2.30.25
depends_on:
supabase-db:
condition: service_healthy
supabase-rest:
condition: service_started
environment:
PORT: 4000
DB_HOST: supabase-db
DB_PORT: 5432
DB_USER: supabase_realtime_admin
DB_PASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
DB_NAME: postgres
DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
DB_ENC_KEY: supabaserealtime
API_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
FLY_ALLOC_ID: fly123
FLY_APP_NAME: realtime
SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
ERL_AFLAGS: -proto_dist inet_tcp
ENABLE_TAILSCALE: "false"
DNS_NODES: "''"
ports:
- "4000:4000"
networks:
- usda-vision-network
restart: unless-stopped
# Supabase Storage
supabase-storage:
container_name: usda-vision-supabase-storage
image: supabase/storage-api:v1.11.8
depends_on:
supabase-db:
condition: service_healthy
supabase-rest:
condition: service_started
environment:
ANON_KEY: ${ANON_KEY:-[REDACTED]}
SERVICE_KEY: ${SERVICE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU}
POSTGREST_URL: http://supabase-rest:3000
PGRST_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}@supabase-db:5432/postgres
FILE_SIZE_LIMIT: 52428800
STORAGE_BACKEND: file
FILE_STORAGE_BACKEND_PATH: /var/lib/storage
TENANT_ID: stub
REGION: stub
GLOBAL_S3_BUCKET: stub
ENABLE_IMAGE_TRANSFORMATION: "false"
volumes:
- supabase-storage:/var/lib/storage
ports:
- "5000:5000"
networks:
- usda-vision-network
restart: unless-stopped
# Supabase Studio
supabase-studio:
container_name: usda-vision-supabase-studio
image: supabase/studio:20241218-5c0e5a0
depends_on:
supabase-rest:
condition: service_started
supabase-auth:
condition: service_started
environment:
STUDIO_PG_META_URL: http://supabase-meta:8080
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
DEFAULT_ORGANIZATION_NAME: Default Organization
DEFAULT_PROJECT_NAME: Default Project
SUPABASE_URL: http://supabase-rest:3000
SUPABASE_PUBLIC_URL: http://localhost:54321
SUPABASE_ANON_KEY: ${ANON_KEY:-[REDACTED]}
SUPABASE_SERVICE_KEY: ${SERVICE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU}
ports:
- "54323:3000"
networks:
- usda-vision-network
restart: unless-stopped
# Supabase Meta (for Studio)
supabase-meta:
container_name: usda-vision-supabase-meta
image: supabase/postgres-meta:v0.88.0
depends_on:
supabase-db:
condition: service_healthy
environment:
PG_META_PORT: 8080
PG_META_DB_HOST: supabase-db
PG_META_DB_PORT: 5432
PG_META_DB_NAME: postgres
PG_META_DB_USER: supabase_admin
PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
ports:
- "54328:8080"
networks:
- usda-vision-network
restart: unless-stopped
# Migration Runner - runs migrations after database is ready
supabase-migrate:
container_name: usda-vision-supabase-migrate
image: postgres:17-alpine
depends_on:
supabase-db:
condition: service_healthy
environment:
PGHOST: supabase-db
PGPORT: 5432
PGDATABASE: postgres
PGUSER: supabase_admin
PGPASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
volumes:
- ./management-dashboard-web-app/supabase/migrations:/migrations:ro
- ./management-dashboard-web-app/supabase/seed_01_users.sql:/seed_01_users.sql:ro
- ./management-dashboard-web-app/supabase/seed_02_phase2_experiments.sql:/seed_02_phase2_experiments.sql:ro
command: >
sh -c "
echo 'Waiting for database to be ready...';
until pg_isready -h supabase-db -p 5432 -U supabase_admin; do
sleep 2;
done;
echo 'Database is ready. Running migrations...';
for migration in /migrations/*.sql; do
if [ -f \"\$$migration\" ]; then
echo \"Running migration: \$$(basename \$$migration)\";
psql -h supabase-db -U supabase_admin -d postgres -f \$$migration || echo \"Migration \$$(basename \$$migration) may have already been applied\";
fi;
done;
echo 'Running seed files...';
psql -h supabase-db -U supabase_admin -d postgres -f /seed_01_users.sql || echo 'Seed 01 may have already been applied';
psql -h supabase-db -U supabase_admin -d postgres -f /seed_02_phase2_experiments.sql || echo 'Seed 02 may have already been applied';
echo 'Migrations and seeds completed!';
"
networks:
- usda-vision-network
restart: "no"
# Supabase Inbucket (Email Testing)
supabase-inbucket:
container_name: usda-vision-supabase-inbucket
image: inbucket/inbucket:stable
ports:
- "54324:9000"
- "54325:2500"
- "54326:1100"
networks:
- usda-vision-network
restart: unless-stopped
# ============================================================================
# Supabase Services (Database & Backend) - COMMENTED OUT
# ============================================================================
# All Supabase services are grouped together and labeled with "supabase"
# namespace for better organization. They start by default with all services.
# To manage Supabase services separately, use:
# - Filter by label: docker compose ps --filter "label=com.usda-vision.service=supabase"
# - Or use service names: docker compose ps supabase-*
#
# NOTE: Currently commented out to test Supabase CLI setup from management-dashboard-web-app
# # # Supabase Database
# # supabase-db:
# container_name: usda-vision-supabase-db
# image: public.ecr.aws/supabase/postgres:17.4.1.068
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=database"
# healthcheck:
# test: ["CMD-SHELL", "pg_isready -U postgres -d postgres"]
# interval: 10s
# timeout: 5s
# retries: 5
# start_period: 30s
# volumes:
# - supabase-db:/var/lib/postgresql/data
# environment:
# POSTGRES_HOST: /var/run/postgresql
# PGPORT: 5432
# POSTGRES_PORT: 5432
# PGDATABASE: postgres
# POSTGRES_DB: postgres
# # Use postgres as the default user (Supabase image will create supabase_admin during init)
# PGUSER: postgres
# POSTGRES_USER: postgres
# PGPASSWORD: ${POSTGRES_PASSWORD:-postgres}
# POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# # Supabase-specific environment variables
# JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# JWT_EXP: ${JWT_EXP:-3600}
# ports:
# - "54322:5432"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase PostgREST API
# supabase-rest:
# container_name: usda-vision-supabase-rest
# image: public.ecr.aws/supabase/postgrest:v12.2.12
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=api"
# depends_on:
# supabase-db:
# condition: service_healthy
# supabase-migrate:
# condition: service_completed_successfully
# environment:
# PGRST_DB_URI: postgres://authenticator:${POSTGRES_PASSWORD:-postgres}@supabase-db:5432/postgres
# PGRST_DB_SCHEMAS: public,graphql_public
# PGRST_DB_EXTRA_SEARCH_PATH: public,extensions
# PGRST_DB_ANON_ROLE: anon
# PGRST_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# PGRST_DB_USE_LEGACY_GUCS: "false"
# PGRST_APP_SETTINGS_JWT_SECRET: ${JWT_SECRET:-your-super-secret-jwt-token-with-at-least-32-characters-long}
# PGRST_APP_SETTINGS_JWT_EXP: ${JWT_EXP:-3600}
# ports:
# - "54321:3000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase GoTrue (Auth)
# supabase-auth:
# container_name: usda-vision-supabase-auth
# image: public.ecr.aws/supabase/gotrue:v2.177.0
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=auth"
# depends_on:
# supabase-db:
# condition: service_healthy
# environment:
# GOTRUE_API_HOST: 0.0.0.0
# GOTRUE_API_PORT: 9999
# API_EXTERNAL_URL: http://localhost:54321
# GOTRUE_DB_DRIVER: postgres
# GOTRUE_DB_DATABASE_URL: postgres://supabase_auth_admin:${POSTGRES_PASSWORD:-postgres}@supabase-db:5432/postgres
# GOTRUE_SITE_URL: http://localhost:8080
# GOTRUE_URI_ALLOW_LIST: http://localhost:8080,http://localhost:3000,https://localhost:3000
# GOTRUE_DISABLE_SIGNUP: "false"
# GOTRUE_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# GOTRUE_JWT_EXP: ${JWT_EXP:-3600}
# GOTRUE_JWT_DEFAULT_GROUP_NAME: authenticated
# GOTRUE_EXTERNAL_EMAIL_ENABLED: "true"
# GOTRUE_MAILER_AUTOCONFIRM: "true"
# GOTRUE_SMS_AUTOCONFIRM: "true"
# GOTRUE_SMS_PROVIDER: twilio
# GOTRUE_ENABLE_SIGNUP: "true"
# GOTRUE_ENABLE_ANONYMOUS_SIGN_INS: "false"
# GOTRUE_ENABLE_MANUAL_LINKING: "false"
# GOTRUE_PASSWORD_MIN_LENGTH: 6
# GOTRUE_REFRESH_TOKEN_ROTATION_ENABLED: "true"
# GOTRUE_REFRESH_TOKEN_REUSE_INTERVAL: 10
# ports:
# - "9999:9999"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Realtime
# supabase-realtime:
# container_name: usda-vision-supabase-realtime
# image: public.ecr.aws/supabase/realtime:v2.41.10
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=realtime"
# depends_on:
# supabase-db:
# condition: service_healthy
# supabase-rest:
# condition: service_started
# environment:
# PORT: 4000
# DB_HOST: supabase-db
# DB_PORT: 5432
# DB_USER: supabase_realtime_admin
# DB_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# DB_NAME: postgres
# DB_AFTER_CONNECT_QUERY: 'SET search_path TO _realtime'
# DB_ENC_KEY: supabaserealtime
# API_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# FLY_ALLOC_ID: fly123
# FLY_APP_NAME: realtime
# SECRET_KEY_BASE: UpNVntn3cDxHJpq99YMc1T1AQgQpc8kfYTuRgBiYa15BLrx8etQoXz3gZv1/u2oq
# ERL_AFLAGS: -proto_dist inet_tcp
# ENABLE_TAILSCALE: "false"
# DNS_NODES: "''"
# ports:
# - "4000:4000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Storage
# supabase-storage:
# container_name: usda-vision-supabase-storage
# image: public.ecr.aws/supabase/storage-api:v1.25.12
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=storage"
# depends_on:
# supabase-db:
# condition: service_healthy
# supabase-rest:
# condition: service_started
# environment:
# ANON_KEY: ${ANON_KEY:-[REDACTED]}
# SERVICE_KEY: ${SERVICE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU}
# POSTGREST_URL: http://supabase-rest:3000
# PGRST_JWT_SECRET: ${JWT_SECRET:-super-secret-jwt-token-with-at-least-32-characters-long}
# DATABASE_URL: postgres://supabase_storage_admin:${POSTGRES_PASSWORD:-postgres}@supabase-db:5432/postgres
# FILE_SIZE_LIMIT: 52428800
# STORAGE_BACKEND: file
# FILE_STORAGE_BACKEND_PATH: /var/lib/storage
# TENANT_ID: stub
# REGION: stub
# GLOBAL_S3_BUCKET: stub
# ENABLE_IMAGE_TRANSFORMATION: "false"
# volumes:
# - supabase-storage:/var/lib/storage
# ports:
# - "5000:5000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Studio
# supabase-studio:
# container_name: usda-vision-supabase-studio
# image: public.ecr.aws/supabase/studio:2025.07.28-sha-578b707
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=studio"
# depends_on:
# supabase-rest:
# condition: service_started
# supabase-auth:
# condition: service_started
# environment:
# STUDIO_PG_META_URL: http://supabase-meta:8080
# POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
# DEFAULT_ORGANIZATION_NAME: Default Organization
# DEFAULT_PROJECT_NAME: Default Project
# SUPABASE_URL: http://supabase-rest:3000
# SUPABASE_PUBLIC_URL: http://localhost:54321
# SUPABASE_ANON_KEY: ${ANON_KEY:-[REDACTED]}
# SUPABASE_SERVICE_KEY: ${SERVICE_KEY:-eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZS1kZW1vIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImV4cCI6MTk4MzgxMjk5Nn0.EGIM96RAZx35lJzdJsyH-qQwv8Hdp7fsn3W0YpN81IU}
# ports:
# - "54323:3000"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Supabase Meta (for Studio)
# supabase-meta:
# container_name: usda-vision-supabase-meta
# image: public.ecr.aws/supabase/postgres-meta:v0.91.3
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=meta"
# depends_on:
# supabase-db:
# condition: service_healthy
# environment:
# PG_META_PORT: 8080
# PG_META_DB_HOST: supabase-db
# PG_META_DB_PORT: 5432
# PG_META_DB_NAME: postgres
# PG_META_DB_USER: supabase_admin
# PG_META_DB_PASSWORD: ${POSTGRES_PASSWORD:-your-super-secret-and-long-postgres-password}
# ports:
# - "54328:8080"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
# # Migration Runner - runs migrations after database is ready
# supabase-migrate:
# container_name: usda-vision-supabase-migrate
# image: postgres:17-alpine
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=migration"
# depends_on:
# supabase-db:
# condition: service_healthy
# environment:
# PGHOST: supabase-db
# PGPORT: 5432
# PGDATABASE: postgres
# PGUSER: postgres
# PGPASSWORD: ${POSTGRES_PASSWORD:-postgres}
# volumes:
# - ./supabase/migrations:/migrations:ro
# - ./supabase/seed_01_users.sql:/seed_01_users.sql:ro
# - ./supabase/seed_02_phase2_experiments.sql:/seed_02_phase2_experiments.sql:ro
# command: >
# sh -c "
# echo 'Waiting for database to be ready...';
# until pg_isready -h supabase-db -p 5432 -U postgres -d postgres; do
# sleep 2;
# done;
# echo 'Waiting for Supabase initialization to complete...';
# # Wait for Supabase schemas to be initialized (auth schema is a good indicator)
# until psql -h supabase-db -U postgres -d postgres -tc \"SELECT 1 FROM information_schema.schemata WHERE schema_name = 'auth'\" | grep -q 1; do
# echo ' Waiting for Supabase schemas...';
# sleep 3;
# done;
# echo 'Database and Supabase schemas are ready. Running migrations...';
# for migration in /migrations/*.sql; do
# if [ -f \"\$$migration\" ]; then
# echo \"Running migration: \$$(basename \$$migration)\";
# psql -h supabase-db -U postgres -d postgres -f \$$migration || echo \"Migration \$$(basename \$$migration) may have already been applied\";
# fi;
# done;
# echo 'Running seed files...';
# psql -h supabase-db -U postgres -d postgres -f /seed_01_users.sql || echo 'Seed 01 may have already been applied';
# psql -h supabase-db -U postgres -d postgres -f /seed_02_phase2_experiments.sql || echo 'Seed 02 may have already been applied';
# echo 'Migrations and seeds completed!';
# "
# networks:
# - usda-vision-network
# restart: "no"
#
# # Supabase Inbucket (Email Testing)
# supabase-inbucket:
# container_name: usda-vision-supabase-inbucket
# image: inbucket/inbucket:stable
# labels:
# - "com.usda-vision.service=supabase"
# - "com.usda-vision.service.group=email"
# ports:
# - "54324:9000"
# - "54325:2500"
# - "54326:1100"
# networks:
# - usda-vision-network
# restart: unless-stopped
#
api:
container_name: usda-vision-api
build:
@@ -342,11 +389,12 @@ services:
- "8080:8080"
networks:
- usda-vision-network
depends_on:
supabase-rest:
condition: service_started
supabase-auth:
condition: service_started
# depends_on:
# supabase-rest:
# condition: service_started
# supabase-auth:
# condition: service_started
# NOTE: Supabase dependencies commented out - using Supabase CLI instead
video-remote:
container_name: usda-vision-video-remote

View File

@@ -0,0 +1,104 @@
# Docker Compose Service Organization
## Service Namespaces
Services in the docker-compose.yml are organized using labels to create logical namespaces. This allows for better organization and easier management of related services.
## Supabase Namespace
All Supabase services are grouped under the `com.usda-vision.service=supabase` label namespace:
### Services in Supabase Namespace
- **supabase-db** - PostgreSQL database (group: `database`)
- **supabase-rest** - PostgREST API (group: `api`)
- **supabase-auth** - GoTrue authentication (group: `auth`)
- **supabase-realtime** - Realtime subscriptions (group: `realtime`)
- **supabase-storage** - Storage API (group: `storage`)
- **supabase-studio** - Supabase Studio UI (group: `studio`)
- **supabase-meta** - Database metadata service (group: `meta`)
- **supabase-migrate** - Migration runner (group: `migration`)
- **supabase-inbucket** - Email testing server (group: `email`)
### Managing Supabase Services
#### List all Supabase services:
```bash
docker compose ps --filter "label=com.usda-vision.service=supabase"
```
#### View logs for all Supabase services:
```bash
docker compose logs supabase-*
```
#### Stop all Supabase services:
```bash
docker compose stop supabase-*
```
#### Start all Supabase services:
```bash
docker compose start supabase-*
```
#### Restart all Supabase services:
```bash
docker compose restart supabase-*
```
#### View logs for a specific Supabase service group:
```bash
# Database services
docker compose logs supabase-db supabase-migrate
# API services
docker compose logs supabase-rest supabase-auth
# UI services
docker compose logs supabase-studio supabase-meta
```
### Label Structure
Each Supabase service has two labels:
- `com.usda-vision.service=supabase` - Identifies it as part of the Supabase namespace
- `com.usda-vision.service.group=<group>` - Identifies the service's functional group
### Benefits
1. **Better Organization**: All Supabase services are visually grouped in the compose file
2. **Easy Filtering**: Use labels to filter and manage related services
3. **Clear Ownership**: Makes it obvious which services belong together
4. **No Behavior Change**: Services still start by default with `docker compose up`
5. **Flexible Management**: Can manage Supabase services separately when needed
## Future Namespaces
You can extend this pattern to other service groups:
```yaml
services:
api:
labels:
- "com.usda-vision.service=application"
- "com.usda-vision.service.group=api"
web:
labels:
- "com.usda-vision.service=application"
- "com.usda-vision.service.group=frontend"
```
## Visual Organization in docker-compose.yml
The Supabase services are also visually organized with a comment section:
```yaml
# ============================================================================
# Supabase Services (Database & Backend)
# ============================================================================
```
This makes it easy to find and understand the Supabase services section when viewing the file.

View File

@@ -0,0 +1,141 @@
# Fixing Docker Hub Rate Limit Issues
## Problem
When pulling multiple Docker images, you may encounter:
```
Error response from daemon: toomanyrequests: Rate exceeded
```
This happens because Docker Hub limits the number of image pulls for anonymous users.
## Solutions
### Solution 1: Login to Docker Hub (Recommended)
Logging in to Docker Hub increases your rate limit significantly:
```bash
docker login
```
Enter your Docker Hub username and password. This increases your rate limit from ~100 pulls per 6 hours to ~200 pulls per 6 hours.
### Solution 2: Pull Images One at a Time
Use the provided script to pull images with delays:
```bash
./scripts/pull-supabase-images.sh
```
This script:
- Pulls images one at a time
- Waits 10 seconds between pulls
- Retries on rate limit errors
- Shows progress and summary
### Solution 3: Manual Pull with Delays
Pull images manually with delays:
```bash
# Pull one image at a time with delays
docker compose pull supabase-db
sleep 10
docker compose pull supabase-rest
sleep 10
docker compose pull supabase-auth
sleep 10
# ... continue for other services
```
### Solution 4: Wait and Retry
If you hit the rate limit:
1. **Wait 5-10 minutes** for the rate limit window to reset
2. **Try again**:
```bash
docker compose pull supabase-*
```
### Solution 5: Use Cached Images
Check if images are already available locally:
```bash
# Check what Supabase images you have
docker images | grep supabase
# If images exist, you can start services without pulling
docker compose up -d supabase-*
```
## Current Rate Limits
- **Anonymous users**: ~100 pulls per 6 hours per IP
- **Authenticated users**: ~200 pulls per 6 hours per account
- **Paid plans**: Higher limits
## Quick Fix for Supabase Services
If you just need to start Supabase services and have some images cached:
```bash
# 1. Check what's already available
docker images | grep -E "(supabase|postgres|inbucket)"
# 2. Try starting services (will only pull missing images)
docker compose up -d supabase-db supabase-rest supabase-auth supabase-realtime supabase-storage supabase-studio supabase-meta supabase-migrate supabase-inbucket
# 3. If rate limited, wait 5 minutes and try pulling specific images:
docker compose pull supabase-rest
# Wait 10 seconds
docker compose pull supabase-auth
# Continue...
```
## Alternative: Use AWS ECR Directly
Since Supabase images are on AWS ECR (`public.ecr.aws/supabase/`), you can pull directly:
```bash
# Pull directly from ECR (may have different rate limits)
docker pull public.ecr.aws/supabase/postgrest:v12.2.12
docker pull public.ecr.aws/supabase/gotrue:v2.177.0
# ... etc
```
## Prevention
1. **Always login**: `docker login` before pulling many images
2. **Use local images**: Keep images locally when possible
3. **Pull gradually**: Don't pull all images at once
4. **Use image caching**: Docker caches layers, so subsequent pulls are faster
## Verify Images Are Available
After pulling, verify:
```bash
# List all Supabase-related images
docker images | grep -E "(supabase|postgrest|gotrue|realtime|storage|studio|postgres-meta|inbucket)"
# Check specific service images
docker images public.ecr.aws/supabase/postgrest
docker images public.ecr.aws/supabase/gotrue
```
## Start Services After Pulling
Once images are pulled:
```bash
# Start all Supabase services
docker compose up -d supabase-db supabase-rest supabase-auth supabase-realtime supabase-storage supabase-studio supabase-meta supabase-migrate supabase-inbucket
# Or start everything
docker compose up -d
```

View File

@@ -0,0 +1,258 @@
# Running Supabase Services with Docker Compose
## Quick Start
### Start All Supabase Services
```bash
# From project root
docker compose up -d supabase-db supabase-rest supabase-auth supabase-realtime supabase-storage supabase-studio supabase-meta supabase-migrate supabase-inbucket
```
Or use the shorthand (all services starting with `supabase-`):
```bash
docker compose up -d $(docker compose config --services | grep supabase | tr '\n' ' ')
```
### Start All Services (Including Supabase)
```bash
# Start everything including Supabase
docker compose up -d
```
## Individual Service Management
### Start Specific Services
```bash
# Start just the database
docker compose up -d supabase-db
# Start database + API
docker compose up -d supabase-db supabase-rest
# Start all core services (db, rest, auth)
docker compose up -d supabase-db supabase-rest supabase-auth
```
### Stop Supabase Services
```bash
# Stop all Supabase services
docker compose stop supabase-*
# Or stop specific services
docker compose stop supabase-db supabase-rest
```
### Restart Supabase Services
```bash
# Restart all Supabase services
docker compose restart supabase-*
# Restart specific service
docker compose restart supabase-db
```
## Checking Service Status
### List All Supabase Services
```bash
# Using labels (recommended)
docker compose ps --filter "label=com.usda-vision.service=supabase"
# Using service name pattern
docker compose ps supabase-*
# Or just check all services
docker compose ps
```
### View Logs
```bash
# All Supabase services
docker compose logs -f supabase-*
# Specific service
docker compose logs -f supabase-db
# Last 50 lines of a service
docker compose logs --tail=50 supabase-migrate
```
### Check Service Health
```bash
# Check if database is healthy
docker compose ps supabase-db
# Check migration status
docker compose logs supabase-migrate | tail -20
# Check all Supabase services status
docker compose ps --filter "label=com.usda-vision.service=supabase" --format "table {{.Name}}\t{{.Status}}\t{{.Ports}}"
```
## Service Dependencies
Supabase services have dependencies. Start them in this order for best results:
1. **supabase-db** - Database (must start first)
2. **supabase-migrate** - Runs migrations (depends on db)
3. **supabase-rest** - API (depends on db + migrate)
4. **supabase-auth** - Authentication (depends on db)
5. **supabase-meta** - Metadata service (depends on db)
6. **supabase-realtime** - Realtime (depends on db + rest)
7. **supabase-storage** - Storage (depends on db + rest)
8. **supabase-studio** - Studio UI (depends on rest + auth)
9. **supabase-inbucket** - Email testing (independent)
**Note**: Docker Compose handles dependencies automatically via `depends_on`, so you can start them all at once and they'll start in the correct order.
## Common Commands
### Start Everything
```bash
docker compose up -d
```
### Start Only Supabase
```bash
docker compose up -d supabase-db supabase-rest supabase-auth supabase-realtime supabase-storage supabase-studio supabase-meta supabase-migrate supabase-inbucket
```
### Stop Everything
```bash
docker compose down
```
### Stop Only Supabase (Keep Other Services Running)
```bash
docker compose stop supabase-*
```
### View Real-time Logs
```bash
# All Supabase services
docker compose logs -f supabase-*
# Specific service
docker compose logs -f supabase-db
```
### Reset Supabase (Remove Volumes)
```bash
# Stop and remove Supabase containers and volumes
docker compose down -v supabase-*
# Or use the reset script
./scripts/docker-compose-reset.sh
```
## Troubleshooting
### Rate Limit Errors
If you see `toomanyrequests: Rate exceeded`, Docker Hub is rate-limiting you. Solutions:
1. **Wait a few minutes** and try again
2. **Login to Docker Hub** (increases rate limit):
```bash
docker login
```
3. **Use cached images** if available:
```bash
docker images | grep supabase
```
### Services Not Starting
1. **Check logs**:
```bash
docker compose logs supabase-db
```
2. **Check if ports are in use**:
```bash
# Check if port 54322 (database) is in use
sudo lsof -i :54322
```
3. **Verify network exists**:
```bash
docker network ls | grep usda-vision
```
### Migration Issues
If migrations fail:
1. **Check migration logs**:
```bash
docker compose logs supabase-migrate
```
2. **Manually run migrations**:
```bash
docker compose exec supabase-db psql -U postgres -d postgres -f /path/to/migration.sql
```
3. **Reset database** (⚠️ **WARNING**: This deletes all data):
```bash
docker compose down -v supabase-db
docker compose up -d supabase-db
```
## Service URLs
Once running, access services at:
- **Supabase API**: http://localhost:54321
- **Supabase Studio**: http://localhost:54323
- **Database (direct)**: `postgresql://postgres:postgres@localhost:54322/postgres`
- **Email Testing (Inbucket)**: http://localhost:54324
- **Realtime**: ws://localhost:4000
- **Storage API**: http://localhost:5000
## Best Practices
1. **Always start database first** (or let Compose handle dependencies)
2. **Wait for migrations** before starting dependent services
3. **Check logs** if services fail to start
4. **Use labels** to filter and manage Supabase services
5. **Keep volumes** for persistent data (don't use `-v` flag unless resetting)
## Example: Full Startup Sequence
```bash
# 1. Start database
docker compose up -d supabase-db
# 2. Wait for database to be ready (check logs)
docker compose logs -f supabase-db
# 3. Start all other Supabase services
docker compose up -d supabase-rest supabase-auth supabase-realtime supabase-storage supabase-studio supabase-meta supabase-migrate supabase-inbucket
# 4. Verify all services are running
docker compose ps --filter "label=com.usda-vision.service=supabase"
```
Or simply:
```bash
# Start everything at once (Compose handles dependencies)
docker compose up -d
```

View File

@@ -56,7 +56,7 @@ The default anon key for local development is:
Migrations are automatically run on first startup via the `supabase-migrate` service. The service:
1. Waits for the database to be ready
2. Runs all migrations from `management-dashboard-web-app/supabase/migrations/` in alphabetical order
2. Runs all migrations from `supabase/migrations/` in alphabetical order
3. Runs seed files (`seed_01_users.sql` and `seed_02_phase2_experiments.sql`)
If you need to re-run migrations, you can:

View File

@@ -0,0 +1,93 @@
# Supabase Directory Migration
## What Changed
The Supabase configuration has been moved from `management-dashboard-web-app/supabase/` to the project root `supabase/` directory.
## Why This Change?
1. **Better Architecture**: Supabase is shared infrastructure, not specific to the web app
2. **Monorepo Best Practice**: Infrastructure concerns should be at the root level
3. **Easier Access**: Other services (APIs, scripts) can now easily reference the database
4. **Clearer Ownership**: Makes it obvious that Supabase is a project-wide resource
## Migration Steps
### For Docker Compose Users
No action needed! The `docker-compose.yml` has been updated to use the new paths. Just restart your containers:
```bash
docker compose down
docker compose up -d
```
### For Supabase CLI Users
**Before** (old way):
```bash
cd management-dashboard-web-app
supabase start
supabase db reset
```
**After** (new way):
```bash
# From project root - no need to cd!
supabase start
supabase db reset
```
The Supabase CLI automatically looks for the `supabase/` directory in the current working directory, so you can now run all Supabase commands from the project root.
### Updating Your Workflow
If you have scripts or documentation that reference the old path, update them:
-`management-dashboard-web-app/supabase/migrations/`
-`supabase/migrations/`
-`management-dashboard-web-app/supabase/config.toml`
-`supabase/config.toml`
## Backward Compatibility
The old directory (`management-dashboard-web-app/supabase/`) can be kept for reference, but it's no longer used by docker-compose or the Supabase CLI. You can safely remove it after verifying everything works:
```bash
# After verifying everything works with the new location
rm -rf management-dashboard-web-app/supabase
```
## Verification
To verify the migration worked:
1. **Check docker-compose paths**:
```bash
grep -r "supabase" docker-compose.yml
# Should show: ./supabase/ (not ./management-dashboard-web-app/supabase/)
```
2. **Test Supabase CLI**:
```bash
# From project root
supabase status
# Should work without needing to cd into management-dashboard-web-app
```
3. **Test migrations**:
```bash
docker compose up -d
docker compose logs supabase-migrate
# Should show migrations running successfully
```
## Benefits
✅ Run Supabase commands from project root
✅ Clearer project structure
✅ Easier to share database across services
✅ Better alignment with monorepo best practices
✅ Infrastructure separated from application code

87
scripts/docker-compose-reset.sh Normal file → Executable file
View File

@@ -2,8 +2,10 @@
# Docker Compose Reset Script
# This script performs a complete reset of the Docker Compose environment:
# - Stops and removes containers, networks, and volumes
# - Explicitly removes Supabase volumes (supabase-db, supabase-storage)
# - Prunes unused Docker resources (containers, images, networks, volumes)
# - Rebuilds and starts all services in detached mode
# - Waits for Supabase migrations to complete
set -e # Exit on error
@@ -24,7 +26,23 @@ echo "1. Stopping and removing containers, networks, and volumes..."
docker compose down -v
echo ""
echo "2. Pruning unused Docker resources..."
echo "2. Removing Supabase volumes explicitly..."
# Find and remove Supabase volumes (they may be named with project prefix)
# Docker Compose typically names volumes as: <project>_<volume-name>
# Try to find volumes containing 'supabase-db' or 'supabase-storage'
SUPABASE_VOLUMES=$(docker volume ls --format "{{.Name}}" | grep -E "(supabase-db|supabase-storage)" || true)
if [ -n "$SUPABASE_VOLUMES" ]; then
echo "$SUPABASE_VOLUMES" | while read -r volume; do
echo " - Removing Supabase volume: $volume"
docker volume rm "$volume" 2>/dev/null || echo " (Volume may have been removed already or is in use)"
done
else
echo " - No Supabase volumes found (they may have been removed already)"
fi
echo ""
echo "3. Pruning unused Docker resources..."
echo " - Pruning unused containers..."
docker container prune -f
@@ -38,18 +56,79 @@ echo " - Pruning unused volumes..."
docker volume prune -f
echo ""
echo "3. Rebuilding and starting all services in detached mode..."
echo "4. Rebuilding and starting all services in detached mode..."
docker compose up --build -d
echo ""
echo "4. Checking service status..."
echo "5. Waiting for Supabase database to be ready..."
# Wait for database to be healthy
MAX_WAIT=60
WAIT_COUNT=0
while [ $WAIT_COUNT -lt $MAX_WAIT ]; do
if docker compose ps supabase-db | grep -q "healthy"; then
echo " ✓ Supabase database is healthy"
break
fi
echo " Waiting for database... ($WAIT_COUNT/$MAX_WAIT seconds)"
sleep 2
WAIT_COUNT=$((WAIT_COUNT + 2))
done
if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
echo " ⚠ Warning: Database may not be fully ready"
fi
echo ""
echo "6. Waiting for Supabase migrations to complete..."
# Wait for migration container to complete (it has restart: "no", so it should exit when done)
MAX_WAIT=120
WAIT_COUNT=0
MIGRATE_CONTAINER="usda-vision-supabase-migrate"
while [ $WAIT_COUNT -lt $MAX_WAIT ]; do
# Check if container exists and its status
if docker ps -a --format "{{.Names}}\t{{.Status}}" | grep -q "^${MIGRATE_CONTAINER}"; then
CONTAINER_STATUS=$(docker ps -a --format "{{.Names}}\t{{.Status}}" | grep "^${MIGRATE_CONTAINER}" | awk '{print $2}')
if echo "$CONTAINER_STATUS" | grep -q "Exited"; then
EXIT_CODE=$(docker inspect "$MIGRATE_CONTAINER" --format='{{.State.ExitCode}}' 2>/dev/null || echo "1")
if [ "$EXIT_CODE" = "0" ]; then
echo " ✓ Supabase migrations completed successfully"
break
else
echo " ⚠ Warning: Migrations may have failed (exit code: $EXIT_CODE)"
echo " Check logs with: docker compose logs supabase-migrate"
break
fi
fi
fi
echo " Waiting for migrations... ($WAIT_COUNT/$MAX_WAIT seconds)"
sleep 2
WAIT_COUNT=$((WAIT_COUNT + 2))
done
if [ $WAIT_COUNT -ge $MAX_WAIT ]; then
echo " ⚠ Warning: Migration timeout - check logs with: docker compose logs supabase-migrate"
echo " Note: Migrations may still be running or the container may not have started yet"
fi
echo ""
echo "7. Checking service status..."
docker compose ps
echo ""
echo "=== Docker Compose Reset Complete ==="
echo ""
echo "All services have been reset and are running in detached mode."
echo "Use 'docker compose logs -f' to view logs or 'docker compose ps' to check status."
echo ""
echo "Useful commands:"
echo " - View all logs: docker compose logs -f"
echo " - View Supabase logs: docker compose logs -f supabase-db supabase-rest supabase-auth"
echo " - View migration logs: docker compose logs supabase-migrate"
echo " - Check service status: docker compose ps"
echo " - Access Supabase Studio: http://localhost:54323"
echo ""

80
scripts/pull-supabase-images.sh Executable file
View File

@@ -0,0 +1,80 @@
#!/bin/bash
# Script to pull Supabase Docker images one at a time to avoid rate limits
set -e
echo "=== Pulling Supabase Images ==="
echo ""
echo "This script pulls images one at a time with delays to avoid Docker Hub rate limits."
echo ""
cd "$(dirname "$0")/.."
# List of Supabase services to pull
SERVICES=(
"supabase-db"
"supabase-rest"
"supabase-auth"
"supabase-realtime"
"supabase-storage"
"supabase-studio"
"supabase-meta"
"supabase-migrate"
"supabase-inbucket"
)
# Delay between pulls (in seconds)
DELAY=10
SUCCESS=0
FAILED=0
for service in "${SERVICES[@]}"; do
echo "Pulling image for: $service"
if docker compose pull "$service" 2>&1 | grep -q "Error\|rate\|toomanyrequests"; then
echo " ⚠ Rate limit hit or error. Waiting ${DELAY} seconds..."
sleep $DELAY
# Try once more
if ! docker compose pull "$service" 2>&1 | tail -1 | grep -q "Error\|rate\|toomanyrequests"; then
echo " ✓ Successfully pulled $service"
((SUCCESS++))
else
echo " ✗ Failed to pull $service (rate limited)"
((FAILED++))
echo " → You may need to wait longer or login to Docker Hub: docker login"
fi
else
echo " ✓ Successfully pulled $service"
((SUCCESS++))
fi
# Small delay between pulls
if [ $SUCCESS -lt ${#SERVICES[@]} ]; then
echo " Waiting ${DELAY} seconds before next pull..."
sleep $DELAY
fi
echo ""
done
echo "=== Summary ==="
echo "Successfully pulled: $SUCCESS"
echo "Failed: $FAILED"
echo ""
if [ $FAILED -eq 0 ]; then
echo "✓ All images pulled successfully!"
echo ""
echo "You can now start the services:"
echo " docker compose up -d supabase-*"
else
echo "⚠ Some images failed to pull due to rate limits."
echo ""
echo "Solutions:"
echo " 1. Wait a few minutes and run this script again"
echo " 2. Login to Docker Hub: docker login"
echo " 3. Pull images manually one at a time with delays"
fi

View File

@@ -0,0 +1 @@
main

View File

@@ -0,0 +1 @@
v2.67.1

99
supabase/README.md Normal file
View File

@@ -0,0 +1,99 @@
# Supabase Database Module
This directory contains all Supabase configuration, migrations, and seed data for the USDA Vision project.
## Structure
```
supabase/
├── config.toml # Supabase CLI configuration
├── migrations/ # Database migration files (run in order)
│ ├── 00001_extensions_and_utilities.sql
│ ├── 00002_users_and_roles.sql
│ └── ...
├── seed_01_users.sql # Initial user data
└── seed_02_phase2_experiments.sql # Initial experiment data
```
## Usage
### With Docker Compose (Recommended)
The Supabase containers are managed by the main `docker-compose.yml` at the project root. Migrations and seeds are automatically run on startup.
```bash
# From project root
docker compose up -d
```
### With Supabase CLI
If you need to use Supabase CLI commands, run them from the project root (this directory's parent):
```bash
# From project root
cd /path/to/USDA-VISION
# Start Supabase (if not using docker-compose)
supabase start
# Run migrations manually
supabase db reset
# Generate types
supabase gen types typescript --local > management-dashboard-web-app/src/types/supabase.ts
```
**Note**: The Supabase CLI looks for the `supabase/` directory in the current working directory. Since we've moved it to the root, you can now run Supabase commands from the project root instead of needing to `cd` into `management-dashboard-web-app`.
## Migration Workflow
1. **Create a new migration**:
```bash
supabase migration new migration_name
```
2. **Apply migrations**:
- Automatically via docker-compose on startup
- Manually: `supabase db reset` (from project root)
3. **Check migration status**:
```bash
supabase migration list
```
## Seed Data
Seed files are run automatically after migrations when using docker-compose. They populate the database with initial data:
- `seed_01_users.sql`: Creates admin user and initial user profiles
- `seed_02_phase2_experiments.sql`: Creates initial experiment data
## Configuration
The `config.toml` file contains all Supabase service configurations:
- Database port: 54322
- API port: 54321
- Studio port: 54323
- Inbucket (email testing): 54324
See `config.toml` for detailed configuration options.
## Accessing Services
- **Supabase Studio**: http://localhost:54323
- **API**: http://localhost:54321
- **Database**: `postgresql://postgres:postgres@localhost:54322/postgres`
- **Email Testing (Inbucket)**: http://localhost:54324
## Best Practices
1. **Migrations are versioned**: Always use numbered prefixes (e.g., `00001_`, `00002_`)
2. **Migrations should be idempotent**: Use `IF NOT EXISTS` and `CREATE OR REPLACE` where possible
3. **Test migrations locally**: Always test migrations before committing
4. **Document breaking changes**: Add notes in migration files for schema changes
## Related Documentation
- [Supabase Docker Compose Integration](../../docs/SUPABASE_DOCKER_COMPOSE.md)
- [Database Schema](../../docs/database_schema.md)

333
supabase/config.toml Executable file
View File

@@ -0,0 +1,333 @@
# For detailed configuration reference documentation, visit:
# https://supabase.com/docs/guides/local-development/cli/config
# A string used to distinguish different Supabase projects on the same host. Defaults to the
# working directory name when running `supabase init`.
project_id = "pecan_experiments"
[api]
enabled = true
# Port to use for the API URL.
port = 54321
# Schemas to expose in your API. Tables, views and stored procedures in this schema will get API
# endpoints. `public` and `graphql_public` schemas are included by default.
schemas = ["public", "graphql_public"]
# Extra schemas to add to the search_path of every request.
extra_search_path = ["public", "extensions"]
# The maximum number of rows returns from a view, table, or stored procedure. Limits payload size
# for accidental or malicious requests.
max_rows = 1000
[api.tls]
# Enable HTTPS endpoints locally using a self-signed certificate.
enabled = false
[db]
# Port to use for the local database URL.
port = 54322
# Port used by db diff command to initialize the shadow database.
shadow_port = 54320
# The database major version to use. This has to be the same as your remote database's. Run `SHOW
# server_version;` on the remote database to check.
major_version = 17
[db.pooler]
enabled = false
# Port to use for the local connection pooler.
port = 54329
# Specifies when a server connection can be reused by other clients.
# Configure one of the supported pooler modes: `transaction`, `session`.
pool_mode = "transaction"
# How many server connections to allow per user/database pair.
default_pool_size = 20
# Maximum number of client connections allowed.
max_client_conn = 100
# [db.vault]
# secret_key = "env(SECRET_VALUE)"
[db.migrations]
# If disabled, migrations will be skipped during a db push or reset.
enabled = true
# Specifies an ordered list of schema files that describe your database.
# Supports glob patterns relative to supabase directory: "./schemas/*.sql"
schema_paths = []
[db.seed]
# If enabled, seeds the database after migrations during a db reset.
enabled = true
# Specifies an ordered list of seed files to load during db reset.
# Supports glob patterns relative to supabase directory: "./seeds/*.sql"
sql_paths = ["./seed_01_users.sql", "./seed_02_phase2_experiments.sql"]
# , "./seed_04_phase2_jc_experiments.sql", "./seed_05_meyer_experiments.sql"]
[db.network_restrictions]
# Enable management of network restrictions.
enabled = false
# List of IPv4 CIDR blocks allowed to connect to the database.
# Defaults to allow all IPv4 connections. Set empty array to block all IPs.
allowed_cidrs = ["0.0.0.0/0"]
# List of IPv6 CIDR blocks allowed to connect to the database.
# Defaults to allow all IPv6 connections. Set empty array to block all IPs.
allowed_cidrs_v6 = ["::/0"]
[realtime]
enabled = true
# Bind realtime via either IPv4 or IPv6. (default: IPv4)
# ip_version = "IPv6"
# The maximum length in bytes of HTTP request headers. (default: 4096)
# max_header_length = 4096
[studio]
enabled = true
# Port to use for Supabase Studio.
port = 54323
# External URL of the API server that frontend connects to.
api_url = "http://exp-dash"
# OpenAI API Key to use for Supabase AI in the Supabase Studio.
openai_api_key = "env(OPENAI_API_KEY)"
# Email testing server. Emails sent with the local dev setup are not actually sent - rather, they
# are monitored, and you can view the emails that would have been sent from the web interface.
[inbucket]
enabled = true
# Port to use for the email testing server web interface.
port = 54324
# Uncomment to expose additional ports for testing user applications that send emails.
# smtp_port = 54325
# pop3_port = 54326
# admin_email = "admin@email.com"
# sender_name = "Admin"
[storage]
enabled = true
# The maximum file size allowed (e.g. "5MB", "500KB").
file_size_limit = "50MiB"
# Image transformation API is available to Supabase Pro plan.
# [storage.image_transformation]
# enabled = true
# Uncomment to configure local storage buckets
# [storage.buckets.images]
# public = false
# file_size_limit = "50MiB"
# allowed_mime_types = ["image/png", "image/jpeg"]
# objects_path = "./images"
[auth]
enabled = true
# The base URL of your website. Used as an allow-list for redirects and for constructing URLs used
# in emails.
site_url = "http://exp-dash:3000"
# A list of *exact* URLs that auth providers are permitted to redirect to post authentication.
additional_redirect_urls = ["https://exp-dash:3000"]
# How long tokens are valid for, in seconds. Defaults to 3600 (1 hour), maximum 604,800 (1 week).
jwt_expiry = 3600
# If disabled, the refresh token will never expire.
enable_refresh_token_rotation = true
# Allows refresh tokens to be reused after expiry, up to the specified interval in seconds.
# Requires enable_refresh_token_rotation = true.
refresh_token_reuse_interval = 10
# Allow/disallow new user signups to your project.
enable_signup = true
# Allow/disallow anonymous sign-ins to your project.
enable_anonymous_sign_ins = false
# Allow/disallow testing manual linking of accounts
enable_manual_linking = false
# Passwords shorter than this value will be rejected as weak. Minimum 6, recommended 8 or more.
minimum_password_length = 6
# Passwords that do not meet the following requirements will be rejected as weak. Supported values
# are: `letters_digits`, `lower_upper_letters_digits`, `lower_upper_letters_digits_symbols`
password_requirements = ""
[auth.rate_limit]
# Number of emails that can be sent per hour. Requires auth.email.smtp to be enabled.
email_sent = 2
# Number of SMS messages that can be sent per hour. Requires auth.sms to be enabled.
sms_sent = 30
# Number of anonymous sign-ins that can be made per hour per IP address. Requires enable_anonymous_sign_ins = true.
anonymous_users = 30
# Number of sessions that can be refreshed in a 5 minute interval per IP address.
token_refresh = 150
# Number of sign up and sign-in requests that can be made in a 5 minute interval per IP address (excludes anonymous users).
sign_in_sign_ups = 30
# Number of OTP / Magic link verifications that can be made in a 5 minute interval per IP address.
token_verifications = 30
# Number of Web3 logins that can be made in a 5 minute interval per IP address.
web3 = 30
# Configure one of the supported captcha providers: `hcaptcha`, `turnstile`.
# [auth.captcha]
# enabled = true
# provider = "hcaptcha"
# secret = ""
[auth.email]
# Allow/disallow new user signups via email to your project.
enable_signup = true
# If enabled, a user will be required to confirm any email change on both the old, and new email
# addresses. If disabled, only the new email is required to confirm.
double_confirm_changes = true
# If enabled, users need to confirm their email address before signing in.
enable_confirmations = false
# If enabled, users will need to reauthenticate or have logged in recently to change their password.
secure_password_change = false
# Controls the minimum amount of time that must pass before sending another signup confirmation or password reset email.
max_frequency = "1s"
# Number of characters used in the email OTP.
otp_length = 6
# Number of seconds before the email OTP expires (defaults to 1 hour).
otp_expiry = 3600
# Use a production-ready SMTP server
# [auth.email.smtp]
# enabled = true
# host = "smtp.sendgrid.net"
# port = 587
# user = "apikey"
# pass = "env(SENDGRID_API_KEY)"
# admin_email = "admin@email.com"
# sender_name = "Admin"
# Uncomment to customize email template
# [auth.email.template.invite]
# subject = "You have been invited"
# content_path = "./supabase/templates/invite.html"
[auth.sms]
# Allow/disallow new user signups via SMS to your project.
enable_signup = false
# If enabled, users need to confirm their phone number before signing in.
enable_confirmations = false
# Template for sending OTP to users
template = "Your code is {{ .Code }}"
# Controls the minimum amount of time that must pass before sending another sms otp.
max_frequency = "5s"
# Use pre-defined map of phone number to OTP for testing.
# [auth.sms.test_otp]
# 4152127777 = "123456"
# Configure logged in session timeouts.
# [auth.sessions]
# Force log out after the specified duration.
# timebox = "24h"
# Force log out if the user has been inactive longer than the specified duration.
# inactivity_timeout = "8h"
# This hook runs before a new user is created and allows developers to reject the request based on the incoming user object.
# [auth.hook.before_user_created]
# enabled = true
# uri = "pg-functions://postgres/auth/before-user-created-hook"
# This hook runs before a token is issued and allows you to add additional claims based on the authentication method used.
# [auth.hook.custom_access_token]
# enabled = true
# uri = "pg-functions://<database>/<schema>/<hook_name>"
# Configure one of the supported SMS providers: `twilio`, `twilio_verify`, `messagebird`, `textlocal`, `vonage`.
[auth.sms.twilio]
enabled = false
account_sid = ""
message_service_sid = ""
# DO NOT commit your Twilio auth token to git. Use environment variable substitution instead:
auth_token = "env(SUPABASE_AUTH_SMS_TWILIO_AUTH_TOKEN)"
# Multi-factor-authentication is available to Supabase Pro plan.
[auth.mfa]
# Control how many MFA factors can be enrolled at once per user.
max_enrolled_factors = 10
# Control MFA via App Authenticator (TOTP)
[auth.mfa.totp]
enroll_enabled = false
verify_enabled = false
# Configure MFA via Phone Messaging
[auth.mfa.phone]
enroll_enabled = false
verify_enabled = false
otp_length = 6
template = "Your code is {{ .Code }}"
max_frequency = "5s"
# Configure MFA via WebAuthn
# [auth.mfa.web_authn]
# enroll_enabled = true
# verify_enabled = true
# Use an external OAuth provider. The full list of providers are: `apple`, `azure`, `bitbucket`,
# `discord`, `facebook`, `github`, `gitlab`, `google`, `keycloak`, `linkedin_oidc`, `notion`, `twitch`,
# `twitter`, `slack`, `spotify`, `workos`, `zoom`.
[auth.external.apple]
enabled = false
client_id = ""
# DO NOT commit your OAuth provider secret to git. Use environment variable substitution instead:
secret = "env(SUPABASE_AUTH_EXTERNAL_APPLE_SECRET)"
# Overrides the default auth redirectUrl.
redirect_uri = ""
# Overrides the default auth provider URL. Used to support self-hosted gitlab, single-tenant Azure,
# or any other third-party OIDC providers.
url = ""
# If enabled, the nonce check will be skipped. Required for local sign in with Google auth.
skip_nonce_check = false
# Allow Solana wallet holders to sign in to your project via the Sign in with Solana (SIWS, EIP-4361) standard.
# You can configure "web3" rate limit in the [auth.rate_limit] section and set up [auth.captcha] if self-hosting.
[auth.web3.solana]
enabled = false
# Use Firebase Auth as a third-party provider alongside Supabase Auth.
[auth.third_party.firebase]
enabled = false
# project_id = "my-firebase-project"
# Use Auth0 as a third-party provider alongside Supabase Auth.
[auth.third_party.auth0]
enabled = false
# tenant = "my-auth0-tenant"
# tenant_region = "us"
# Use AWS Cognito (Amplify) as a third-party provider alongside Supabase Auth.
[auth.third_party.aws_cognito]
enabled = false
# user_pool_id = "my-user-pool-id"
# user_pool_region = "us-east-1"
# Use Clerk as a third-party provider alongside Supabase Auth.
[auth.third_party.clerk]
enabled = false
# Obtain from https://clerk.com/setup/supabase
# domain = "example.clerk.accounts.dev"
[edge_runtime]
enabled = true
# Configure one of the supported request policies: `oneshot`, `per_worker`.
# Use `oneshot` for hot reload, or `per_worker` for load testing.
policy = "oneshot"
# Port to attach the Chrome inspector for debugging edge functions.
inspector_port = 8083
# The Deno major version to use.
deno_version = 1
# [edge_runtime.secrets]
# secret_key = "env(SECRET_VALUE)"
[analytics]
enabled = true
port = 54327
# Configure one of the supported backends: `postgres`, `bigquery`.
backend = "postgres"
# Experimental features may be deprecated any time
[experimental]
# Configures Postgres storage engine to use OrioleDB (S3)
orioledb_version = ""
# Configures S3 bucket URL, eg. <bucket_name>.s3-<region>.amazonaws.com
s3_host = "env(S3_HOST)"
# Configures S3 bucket region, eg. us-east-1
s3_region = "env(S3_REGION)"
# Configures AWS_ACCESS_KEY_ID for S3 bucket
s3_access_key = "env(S3_ACCESS_KEY)"
# Configures AWS_SECRET_ACCESS_KEY for S3 bucket
s3_secret_key = "env(S3_SECRET_KEY)"

View File

@@ -0,0 +1,87 @@
-- Extensions and Utility Functions
-- This migration creates required extensions and utility functions used across the database
-- =============================================
-- 1. EXTENSIONS
-- =============================================
-- Enable UUID generation
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
-- Enable password hashing
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- =============================================
-- 2. UTILITY FUNCTIONS
-- =============================================
-- Function to handle updated_at timestamp
CREATE OR REPLACE FUNCTION public.handle_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Helper function to get current user's roles
CREATE OR REPLACE FUNCTION public.get_user_roles()
RETURNS TEXT[] AS $$
BEGIN
RETURN ARRAY(
SELECT r.name
FROM public.user_roles ur
JOIN public.roles r ON ur.role_id = r.id
WHERE ur.user_id = auth.uid()
);
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Helper function to get current user's first role (for backward compatibility)
CREATE OR REPLACE FUNCTION public.get_user_role()
RETURNS TEXT AS $$
BEGIN
-- Return the first role found (for backward compatibility)
RETURN (
SELECT r.name
FROM public.user_roles ur
JOIN public.roles r ON ur.role_id = r.id
WHERE ur.user_id = auth.uid()
LIMIT 1
);
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Helper function to check if user is admin
CREATE OR REPLACE FUNCTION public.is_admin()
RETURNS BOOLEAN AS $$
BEGIN
RETURN 'admin' = ANY(public.get_user_roles());
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Helper function to check if user has specific role
CREATE OR REPLACE FUNCTION public.has_role(role_name TEXT)
RETURNS BOOLEAN AS $$
BEGIN
RETURN role_name = ANY(public.get_user_roles());
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Helper function to check if user can manage experiments
CREATE OR REPLACE FUNCTION public.can_manage_experiments()
RETURNS BOOLEAN AS $$
BEGIN
RETURN EXISTS (
SELECT 1
FROM public.user_roles ur
JOIN public.roles r ON ur.role_id = r.id
WHERE ur.user_id = auth.uid()
AND r.name IN ('admin', 'conductor')
);
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;

View File

@@ -0,0 +1,237 @@
-- Users and Roles
-- This migration creates user-related tables with clean separation
-- =============================================
-- 1. ROLES TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.roles (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name TEXT NOT NULL UNIQUE,
description TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- =============================================
-- 2. USER PROFILES TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.user_profiles (
id UUID PRIMARY KEY REFERENCES auth.users(id) ON DELETE CASCADE,
email TEXT NOT NULL UNIQUE,
first_name TEXT,
last_name TEXT,
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'inactive', 'suspended')),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- =============================================
-- 3. USER ROLES JUNCTION TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.user_roles (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID NOT NULL REFERENCES public.user_profiles(id) ON DELETE CASCADE,
role_id UUID NOT NULL REFERENCES public.roles(id) ON DELETE CASCADE,
assigned_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
assigned_by UUID REFERENCES public.user_profiles(id),
UNIQUE(user_id, role_id)
);
-- =============================================
-- 4. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_user_profiles_email ON public.user_profiles(email);
CREATE INDEX IF NOT EXISTS idx_user_roles_user_id ON public.user_roles(user_id);
CREATE INDEX IF NOT EXISTS idx_user_roles_role_id ON public.user_roles(role_id);
-- =============================================
-- 5. TRIGGERS
-- =============================================
-- Create trigger for updated_at on user_profiles
CREATE TRIGGER set_updated_at_user_profiles
BEFORE UPDATE ON public.user_profiles
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- Create trigger for updated_at on roles
CREATE TRIGGER set_updated_at_roles
BEFORE UPDATE ON public.roles
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 6. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.roles TO authenticated;
GRANT ALL ON public.user_profiles TO authenticated;
GRANT ALL ON public.user_roles TO authenticated;
-- =============================================
-- 7. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.roles ENABLE ROW LEVEL SECURITY;
ALTER TABLE public.user_profiles ENABLE ROW LEVEL SECURITY;
ALTER TABLE public.user_roles ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 8. CREATE RLS POLICIES
-- =============================================
-- Create RLS policies for roles (read-only for all authenticated users)
CREATE POLICY "Roles are viewable by authenticated users" ON public.roles
FOR SELECT USING (auth.role() = 'authenticated');
-- Create RLS policies for user_profiles
CREATE POLICY "User profiles are viewable by authenticated users" ON public.user_profiles
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "User profiles are insertable by authenticated users" ON public.user_profiles
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "User profiles are updatable by authenticated users" ON public.user_profiles
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "User profiles are deletable by authenticated users" ON public.user_profiles
FOR DELETE USING (auth.role() = 'authenticated');
-- Create RLS policies for user_roles
CREATE POLICY "User roles are viewable by authenticated users" ON public.user_roles
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "User roles are insertable by authenticated users" ON public.user_roles
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "User roles are updatable by authenticated users" ON public.user_roles
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "User roles are deletable by authenticated users" ON public.user_roles
FOR DELETE USING (auth.role() = 'authenticated');
-- =============================================
-- 9. USER MANAGEMENT FUNCTIONS
-- =============================================
-- Function to create a new user with roles
CREATE OR REPLACE FUNCTION public.create_user_with_roles(
user_email TEXT,
role_names TEXT[],
temp_password TEXT
)
RETURNS JSON AS $$
DECLARE
new_user_id UUID;
encrypted_pwd TEXT;
role_name TEXT;
role_id_val UUID;
assigned_by_id UUID;
result JSON;
user_roles_array TEXT[];
BEGIN
-- Generate new user ID
new_user_id := uuid_generate_v4();
-- Encrypt the password
encrypted_pwd := crypt(temp_password, gen_salt('bf'));
-- Get the current user ID for assigned_by, but only if they have a profile
-- Otherwise, use the new user ID (which we'll create next)
SELECT id INTO assigned_by_id
FROM public.user_profiles
WHERE id = auth.uid();
-- If no valid assigned_by user found, use the new user ID (self-assigned)
IF assigned_by_id IS NULL THEN
assigned_by_id := new_user_id;
END IF;
-- Create user in auth.users
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
new_user_id,
'authenticated',
'authenticated',
user_email,
encrypted_pwd,
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
-- Create user profile
INSERT INTO public.user_profiles (id, email, status)
VALUES (new_user_id, user_email, 'active');
-- Assign roles
user_roles_array := ARRAY[]::TEXT[];
FOREACH role_name IN ARRAY role_names
LOOP
-- Get role ID
SELECT id INTO role_id_val
FROM public.roles
WHERE name = role_name;
-- If role exists, assign it
IF role_id_val IS NOT NULL THEN
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
VALUES (new_user_id, role_id_val, assigned_by_id)
ON CONFLICT (user_id, role_id) DO NOTHING;
-- Add to roles array for return value
user_roles_array := array_append(user_roles_array, role_name);
END IF;
END LOOP;
-- Return the result as JSON
result := json_build_object(
'user_id', new_user_id::TEXT,
'email', user_email,
'temp_password', temp_password,
'roles', user_roles_array,
'status', 'active'
);
RETURN result;
EXCEPTION
WHEN unique_violation THEN
RAISE EXCEPTION 'User with email % already exists', user_email;
WHEN OTHERS THEN
RAISE EXCEPTION 'Error creating user: %', SQLERRM;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Grant execute permission on the function
GRANT EXECUTE ON FUNCTION public.create_user_with_roles(TEXT, TEXT[], TEXT) TO authenticated;
-- Comment for documentation
COMMENT ON FUNCTION public.create_user_with_roles(TEXT, TEXT[], TEXT) IS
'Creates a new user in auth.users, creates a profile in user_profiles, and assigns the specified roles. Returns user information including user_id, email, temp_password, roles, and status.';

View File

@@ -0,0 +1,63 @@
-- Machine Types
-- This migration creates the machine types table
-- =============================================
-- 1. MACHINE TYPES TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.machine_types (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name TEXT NOT NULL UNIQUE,
description TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
-- =============================================
-- 2. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_machine_types_name ON public.machine_types(name);
-- =============================================
-- 3. TRIGGERS
-- =============================================
-- Create trigger for updated_at on machine_types
CREATE TRIGGER set_updated_at_machine_types
BEFORE UPDATE ON public.machine_types
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 4. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.machine_types TO authenticated;
-- =============================================
-- 5. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.machine_types ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 6. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "Machine types are viewable by authenticated users" ON public.machine_types
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Machine types are insertable by authenticated users" ON public.machine_types
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Machine types are updatable by authenticated users" ON public.machine_types
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Machine types are deletable by authenticated users" ON public.machine_types
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,77 @@
-- Experiment Phases
-- This migration creates the experiment phases table
-- =============================================
-- 1. EXPERIMENT PHASES TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.experiment_phases (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name TEXT NOT NULL UNIQUE,
description TEXT,
has_soaking BOOLEAN NOT NULL DEFAULT false,
has_airdrying BOOLEAN NOT NULL DEFAULT false,
has_cracking BOOLEAN NOT NULL DEFAULT false,
has_shelling BOOLEAN NOT NULL DEFAULT false,
cracking_machine_type_id UUID REFERENCES public.machine_types(id) ON DELETE SET NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure at least one phase is selected
CONSTRAINT check_at_least_one_phase
CHECK (has_soaking = true OR has_airdrying = true OR has_cracking = true OR has_shelling = true),
-- If has_cracking is true, then cracking_machine_type_id must not be null
CONSTRAINT ck_experiment_phases_machine_required_when_cracking
CHECK ((has_cracking = false) OR (cracking_machine_type_id IS NOT NULL))
);
-- =============================================
-- 2. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_experiment_phases_name ON public.experiment_phases(name);
CREATE INDEX IF NOT EXISTS idx_experiment_phases_cracking_machine_type_id ON public.experiment_phases(cracking_machine_type_id);
-- =============================================
-- 3. TRIGGERS
-- =============================================
-- Create trigger for updated_at on experiment_phases
CREATE TRIGGER set_updated_at_experiment_phases
BEFORE UPDATE ON public.experiment_phases
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 4. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.experiment_phases TO authenticated;
-- =============================================
-- 5. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.experiment_phases ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 6. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "Experiment phases are viewable by authenticated users" ON public.experiment_phases
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Experiment phases are insertable by authenticated users" ON public.experiment_phases
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Experiment phases are updatable by authenticated users" ON public.experiment_phases
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Experiment phases are deletable by authenticated users" ON public.experiment_phases
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,70 @@
-- Experiments
-- This migration creates the experiments table
-- =============================================
-- 1. EXPERIMENTS TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.experiments (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_number INTEGER NOT NULL,
reps_required INTEGER NOT NULL CHECK (reps_required > 0),
weight_per_repetition_lbs DOUBLE PRECISION NOT NULL DEFAULT 5.0 CHECK (weight_per_repetition_lbs > 0),
results_status TEXT NOT NULL DEFAULT 'valid' CHECK (results_status IN ('valid', 'invalid')),
completion_status BOOLEAN NOT NULL DEFAULT false,
phase_id UUID NOT NULL REFERENCES public.experiment_phases(id) ON DELETE SET NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure unique combination of experiment_number and phase_id
CONSTRAINT unique_experiment_number_phase UNIQUE (experiment_number, phase_id)
);
-- =============================================
-- 2. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_experiments_phase_id ON public.experiments(phase_id);
CREATE INDEX IF NOT EXISTS idx_experiments_experiment_number ON public.experiments(experiment_number);
CREATE INDEX IF NOT EXISTS idx_experiments_created_by ON public.experiments(created_by);
CREATE INDEX IF NOT EXISTS idx_experiments_id ON public.experiments(id);
-- =============================================
-- 3. TRIGGERS
-- =============================================
-- Create trigger for updated_at on experiments
CREATE TRIGGER set_updated_at_experiments
BEFORE UPDATE ON public.experiments
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 4. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.experiments TO authenticated;
-- =============================================
-- 5. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.experiments ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 6. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "Experiments are viewable by authenticated users" ON public.experiments
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Experiments are insertable by authenticated users" ON public.experiments
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Experiments are updatable by authenticated users" ON public.experiments
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Experiments are deletable by authenticated users" ON public.experiments
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,69 @@
-- Experiment Repetitions
-- This migration creates the experiment repetitions table
-- =============================================
-- 1. EXPERIMENT REPETITIONS TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.experiment_repetitions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_id UUID NOT NULL REFERENCES public.experiments(id) ON DELETE CASCADE,
repetition_number INTEGER NOT NULL CHECK (repetition_number > 0),
scheduled_date TIMESTAMP WITH TIME ZONE,
status TEXT NOT NULL DEFAULT 'pending' CHECK (status IN ('pending', 'in_progress', 'completed', 'cancelled')),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure unique repetition numbers per experiment
UNIQUE(experiment_id, repetition_number)
);
-- =============================================
-- 2. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_experiment_repetitions_experiment_id ON public.experiment_repetitions(experiment_id);
CREATE INDEX IF NOT EXISTS idx_experiment_repetitions_created_by ON public.experiment_repetitions(created_by);
-- =============================================
-- 3. TRIGGERS
-- =============================================
-- Create trigger for updated_at on experiment_repetitions
CREATE TRIGGER set_updated_at_experiment_repetitions
BEFORE UPDATE ON public.experiment_repetitions
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 4. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.experiment_repetitions TO authenticated;
-- =============================================
-- 5. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.experiment_repetitions ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 6. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "Experiment repetitions are viewable by authenticated users" ON public.experiment_repetitions
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Experiment repetitions are insertable by authenticated users" ON public.experiment_repetitions
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Experiment repetitions are updatable by authenticated users" ON public.experiment_repetitions
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Experiment repetitions are deletable by authenticated users" ON public.experiment_repetitions
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,89 @@
-- Cracker Parameters
-- This migration creates machine-specific parameter tables (must be created before cracking table)
-- =============================================
-- 1. JC CRACKER PARAMETERS TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.jc_cracker_parameters (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
plate_contact_frequency_hz DOUBLE PRECISION NOT NULL CHECK (plate_contact_frequency_hz > 0),
throughput_rate_pecans_sec DOUBLE PRECISION NOT NULL CHECK (throughput_rate_pecans_sec > 0),
crush_amount_in DOUBLE PRECISION NOT NULL CHECK (crush_amount_in >= 0),
entry_exit_height_diff_in DOUBLE PRECISION NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- =============================================
-- 2. MEYER CRACKER PARAMETERS TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.meyer_cracker_parameters (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
motor_speed_hz DOUBLE PRECISION NOT NULL CHECK (motor_speed_hz > 0),
jig_displacement_inches DOUBLE PRECISION NOT NULL,
spring_stiffness_nm DOUBLE PRECISION NOT NULL CHECK (spring_stiffness_nm > 0),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
-- =============================================
-- 3. TRIGGERS
-- =============================================
CREATE TRIGGER set_updated_at_jc_cracker_parameters
BEFORE UPDATE ON public.jc_cracker_parameters
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
CREATE TRIGGER set_updated_at_meyer_cracker_parameters
BEFORE UPDATE ON public.meyer_cracker_parameters
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 4. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.jc_cracker_parameters TO authenticated;
GRANT ALL ON public.meyer_cracker_parameters TO authenticated;
-- =============================================
-- 5. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.jc_cracker_parameters ENABLE ROW LEVEL SECURITY;
ALTER TABLE public.meyer_cracker_parameters ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 6. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "JC Cracker parameters are viewable by authenticated users" ON public.jc_cracker_parameters
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "JC Cracker parameters are insertable by authenticated users" ON public.jc_cracker_parameters
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "JC Cracker parameters are updatable by authenticated users" ON public.jc_cracker_parameters
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "JC Cracker parameters are deletable by authenticated users" ON public.jc_cracker_parameters
FOR DELETE USING (auth.role() = 'authenticated');
CREATE POLICY "Meyer Cracker parameters are viewable by authenticated users" ON public.meyer_cracker_parameters
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Meyer Cracker parameters are insertable by authenticated users" ON public.meyer_cracker_parameters
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Meyer Cracker parameters are updatable by authenticated users" ON public.meyer_cracker_parameters
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Meyer Cracker parameters are deletable by authenticated users" ON public.meyer_cracker_parameters
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,274 @@
-- Phase Data Tables
-- This migration creates phase-specific data entry tables (soaking, airdrying, cracking, shelling)
-- =============================================
-- 1. SOAKING TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.soaking (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
repetition_id UUID NOT NULL REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
soaking_duration_minutes INTEGER NOT NULL CHECK (soaking_duration_minutes > 0),
scheduled_end_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure only one soaking per repetition
CONSTRAINT unique_soaking_per_repetition UNIQUE (repetition_id)
);
-- =============================================
-- 2. AIRDRYING TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.airdrying (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
repetition_id UUID NOT NULL REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
duration_minutes INTEGER NOT NULL CHECK (duration_minutes > 0),
scheduled_end_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure only one airdrying per repetition
CONSTRAINT unique_airdrying_per_repetition UNIQUE (repetition_id)
);
-- =============================================
-- 3. CRACKING TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.cracking (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
repetition_id UUID NOT NULL REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
machine_type_id UUID NOT NULL REFERENCES public.machine_types(id),
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure only one cracking per repetition
CONSTRAINT unique_cracking_per_repetition UNIQUE (repetition_id)
);
-- =============================================
-- 4. SHELLING TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.shelling (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
repetition_id UUID NOT NULL REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure only one shelling per repetition
CONSTRAINT unique_shelling_per_repetition UNIQUE (repetition_id)
);
-- =============================================
-- 5. INDEXES FOR PERFORMANCE
-- =============================================
-- Create indexes for repetition references
CREATE INDEX IF NOT EXISTS idx_soaking_repetition_id ON public.soaking(repetition_id);
CREATE INDEX IF NOT EXISTS idx_airdrying_repetition_id ON public.airdrying(repetition_id);
CREATE INDEX IF NOT EXISTS idx_cracking_repetition_id ON public.cracking(repetition_id);
CREATE INDEX IF NOT EXISTS idx_shelling_repetition_id ON public.shelling(repetition_id);
-- Create indexes for machine type references
CREATE INDEX IF NOT EXISTS idx_cracking_machine_type_id ON public.cracking(machine_type_id);
-- Create indexes for created_by references
CREATE INDEX IF NOT EXISTS idx_soaking_created_by ON public.soaking(created_by);
CREATE INDEX IF NOT EXISTS idx_airdrying_created_by ON public.airdrying(created_by);
CREATE INDEX IF NOT EXISTS idx_cracking_created_by ON public.cracking(created_by);
CREATE INDEX IF NOT EXISTS idx_shelling_created_by ON public.shelling(created_by);
-- =============================================
-- 6. TRIGGER FUNCTIONS FOR AUTOMATIC TIMESTAMP CALCULATIONS
-- =============================================
-- Function to calculate scheduled end time for soaking
CREATE OR REPLACE FUNCTION calculate_soaking_scheduled_end_time()
RETURNS TRIGGER AS $$
BEGIN
NEW.scheduled_end_time = NEW.scheduled_start_time + (NEW.soaking_duration_minutes || ' minutes')::INTERVAL;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Function to calculate scheduled end time for airdrying
CREATE OR REPLACE FUNCTION calculate_airdrying_scheduled_end_time()
RETURNS TRIGGER AS $$
BEGIN
NEW.scheduled_end_time = NEW.scheduled_start_time + (NEW.duration_minutes || ' minutes')::INTERVAL;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Function to set airdrying scheduled start time based on soaking end time
CREATE OR REPLACE FUNCTION set_airdrying_scheduled_start_time()
RETURNS TRIGGER AS $$
BEGIN
-- If this is a new airdrying record and no scheduled_start_time is provided,
-- try to get it from the associated soaking's scheduled_end_time for the same repetition
IF NEW.scheduled_start_time IS NULL THEN
SELECT s.scheduled_end_time INTO NEW.scheduled_start_time
FROM public.soaking s
WHERE s.repetition_id = NEW.repetition_id
LIMIT 1;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Function to set cracking scheduled start time based on airdrying end time
CREATE OR REPLACE FUNCTION set_cracking_scheduled_start_time()
RETURNS TRIGGER AS $$
BEGIN
-- If this is a new cracking record and no scheduled_start_time is provided,
-- try to get it from the associated airdrying's scheduled_end_time for the same repetition
IF NEW.scheduled_start_time IS NULL THEN
SELECT a.scheduled_end_time INTO NEW.scheduled_start_time
FROM public.airdrying a
WHERE a.repetition_id = NEW.repetition_id
LIMIT 1;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- =============================================
-- 7. TRIGGERS
-- =============================================
-- Triggers for automatic timestamp calculations
DROP TRIGGER IF EXISTS trigger_calculate_soaking_scheduled_end_time ON public.soaking;
CREATE TRIGGER trigger_calculate_soaking_scheduled_end_time
BEFORE INSERT OR UPDATE ON public.soaking
FOR EACH ROW
EXECUTE FUNCTION calculate_soaking_scheduled_end_time();
DROP TRIGGER IF EXISTS trigger_calculate_airdrying_scheduled_end_time ON public.airdrying;
CREATE TRIGGER trigger_calculate_airdrying_scheduled_end_time
BEFORE INSERT OR UPDATE ON public.airdrying
FOR EACH ROW
EXECUTE FUNCTION calculate_airdrying_scheduled_end_time();
DROP TRIGGER IF EXISTS trigger_set_airdrying_scheduled_start_time ON public.airdrying;
CREATE TRIGGER trigger_set_airdrying_scheduled_start_time
BEFORE INSERT ON public.airdrying
FOR EACH ROW
EXECUTE FUNCTION set_airdrying_scheduled_start_time();
DROP TRIGGER IF EXISTS trigger_set_cracking_scheduled_start_time ON public.cracking;
CREATE TRIGGER trigger_set_cracking_scheduled_start_time
BEFORE INSERT ON public.cracking
FOR EACH ROW
EXECUTE FUNCTION set_cracking_scheduled_start_time();
-- Triggers for updated_at on all phase tables
CREATE TRIGGER set_updated_at_soaking
BEFORE UPDATE ON public.soaking
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
CREATE TRIGGER set_updated_at_airdrying
BEFORE UPDATE ON public.airdrying
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
CREATE TRIGGER set_updated_at_cracking
BEFORE UPDATE ON public.cracking
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
CREATE TRIGGER set_updated_at_shelling
BEFORE UPDATE ON public.shelling
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 8. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.soaking TO authenticated;
GRANT ALL ON public.airdrying TO authenticated;
GRANT ALL ON public.cracking TO authenticated;
GRANT ALL ON public.shelling TO authenticated;
-- =============================================
-- 9. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.soaking ENABLE ROW LEVEL SECURITY;
ALTER TABLE public.airdrying ENABLE ROW LEVEL SECURITY;
ALTER TABLE public.cracking ENABLE ROW LEVEL SECURITY;
ALTER TABLE public.shelling ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 10. CREATE RLS POLICIES
-- =============================================
-- Create RLS policies for phase tables
CREATE POLICY "Soaking data is viewable by authenticated users" ON public.soaking
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Soaking data is insertable by authenticated users" ON public.soaking
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Soaking data is updatable by authenticated users" ON public.soaking
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Soaking data is deletable by authenticated users" ON public.soaking
FOR DELETE USING (auth.role() = 'authenticated');
CREATE POLICY "Airdrying data is viewable by authenticated users" ON public.airdrying
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Airdrying data is insertable by authenticated users" ON public.airdrying
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Airdrying data is updatable by authenticated users" ON public.airdrying
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Airdrying data is deletable by authenticated users" ON public.airdrying
FOR DELETE USING (auth.role() = 'authenticated');
CREATE POLICY "Cracking data is viewable by authenticated users" ON public.cracking
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Cracking data is insertable by authenticated users" ON public.cracking
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Cracking data is updatable by authenticated users" ON public.cracking
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Cracking data is deletable by authenticated users" ON public.cracking
FOR DELETE USING (auth.role() = 'authenticated');
CREATE POLICY "Shelling data is viewable by authenticated users" ON public.shelling
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Shelling data is insertable by authenticated users" ON public.shelling
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Shelling data is updatable by authenticated users" ON public.shelling
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Shelling data is deletable by authenticated users" ON public.shelling
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,193 @@
-- Conductor Availability
-- This migration creates the conductor availability table
-- =============================================
-- 1. CONDUCTOR AVAILABILITY TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.conductor_availability (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
user_id UUID NOT NULL REFERENCES public.user_profiles(id) ON DELETE CASCADE,
available_from TIMESTAMP WITH TIME ZONE NOT NULL,
available_to TIMESTAMP WITH TIME ZONE NOT NULL,
notes TEXT, -- Optional notes about the availability
status TEXT NOT NULL DEFAULT 'active' CHECK (status IN ('active', 'cancelled')),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure available_to is after available_from
CONSTRAINT valid_time_range CHECK (available_to > available_from),
-- Ensure availability is in the future (can be modified if needed for past records)
CONSTRAINT future_availability CHECK (available_from >= NOW() - INTERVAL '1 day')
);
-- =============================================
-- 2. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_conductor_availability_user_id ON public.conductor_availability(user_id);
CREATE INDEX IF NOT EXISTS idx_conductor_availability_available_from ON public.conductor_availability(available_from);
CREATE INDEX IF NOT EXISTS idx_conductor_availability_available_to ON public.conductor_availability(available_to);
CREATE INDEX IF NOT EXISTS idx_conductor_availability_status ON public.conductor_availability(status);
CREATE INDEX IF NOT EXISTS idx_conductor_availability_created_by ON public.conductor_availability(created_by);
CREATE INDEX IF NOT EXISTS idx_conductor_availability_time_range ON public.conductor_availability(available_from, available_to);
-- =============================================
-- 3. FUNCTIONS FOR OVERLAP PREVENTION
-- =============================================
-- Function to check for overlapping availabilities
CREATE OR REPLACE FUNCTION public.check_availability_overlap()
RETURNS TRIGGER AS $$
DECLARE
overlap_count INTEGER;
BEGIN
-- Check for overlapping availabilities for the same user
SELECT COUNT(*) INTO overlap_count
FROM public.conductor_availability
WHERE user_id = NEW.user_id
AND id != COALESCE(NEW.id, '00000000-0000-0000-0000-000000000000'::UUID)
AND status = 'active'
AND (
-- New availability starts during an existing one
(NEW.available_from >= available_from AND NEW.available_from < available_to) OR
-- New availability ends during an existing one
(NEW.available_to > available_from AND NEW.available_to <= available_to) OR
-- New availability completely contains an existing one
(NEW.available_from <= available_from AND NEW.available_to >= available_to) OR
-- Existing availability completely contains the new one
(available_from <= NEW.available_from AND available_to >= NEW.available_to)
);
IF overlap_count > 0 THEN
RAISE EXCEPTION 'Availability overlaps with existing availability for user %. Please adjust the time range or cancel the conflicting availability.', NEW.user_id;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Function to get available conductors for a specific time range
CREATE OR REPLACE FUNCTION public.get_available_conductors(
start_time TIMESTAMP WITH TIME ZONE,
end_time TIMESTAMP WITH TIME ZONE
)
RETURNS TABLE (
user_id UUID,
email TEXT,
available_from TIMESTAMP WITH TIME ZONE,
available_to TIMESTAMP WITH TIME ZONE
) AS $$
BEGIN
RETURN QUERY
SELECT
ca.user_id,
up.email,
ca.available_from,
ca.available_to
FROM public.conductor_availability ca
JOIN public.user_profiles up ON ca.user_id = up.id
JOIN public.user_roles ur ON up.id = ur.user_id
JOIN public.roles r ON ur.role_id = r.id
WHERE ca.status = 'active'
AND r.name = 'conductor'
AND ca.available_from <= start_time
AND ca.available_to >= end_time
ORDER BY up.email;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Function to check if a conductor is available for a specific time range
CREATE OR REPLACE FUNCTION public.is_conductor_available(
conductor_user_id UUID,
start_time TIMESTAMP WITH TIME ZONE,
end_time TIMESTAMP WITH TIME ZONE
)
RETURNS BOOLEAN AS $$
DECLARE
availability_count INTEGER;
BEGIN
SELECT COUNT(*) INTO availability_count
FROM public.conductor_availability
WHERE user_id = conductor_user_id
AND status = 'active'
AND available_from <= start_time
AND available_to >= end_time;
RETURN availability_count > 0;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- =============================================
-- 4. TRIGGERS
-- =============================================
-- Create trigger for updated_at on conductor_availability
CREATE TRIGGER set_updated_at_conductor_availability
BEFORE UPDATE ON public.conductor_availability
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- Create trigger to prevent overlapping availabilities
CREATE TRIGGER trigger_check_availability_overlap
BEFORE INSERT OR UPDATE ON public.conductor_availability
FOR EACH ROW
EXECUTE FUNCTION public.check_availability_overlap();
-- =============================================
-- 5. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.conductor_availability TO authenticated;
-- =============================================
-- 6. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.conductor_availability ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 7. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "conductor_availability_select_policy" ON public.conductor_availability
FOR SELECT
TO authenticated
USING (
-- Users can view their own availability, admins can view all
user_id = auth.uid() OR public.is_admin()
);
CREATE POLICY "conductor_availability_insert_policy" ON public.conductor_availability
FOR INSERT
TO authenticated
WITH CHECK (
-- Users can create their own availability, admins can create for anyone
(user_id = auth.uid() AND created_by = auth.uid()) OR public.is_admin()
);
CREATE POLICY "conductor_availability_update_policy" ON public.conductor_availability
FOR UPDATE
TO authenticated
USING (
-- Users can update their own availability, admins can update any
user_id = auth.uid() OR public.is_admin()
)
WITH CHECK (
-- Users can update their own availability, admins can update any
user_id = auth.uid() OR public.is_admin()
);
CREATE POLICY "conductor_availability_delete_policy" ON public.conductor_availability
FOR DELETE
TO authenticated
USING (
-- Users can delete their own availability, admins can delete any
user_id = auth.uid() OR public.is_admin()
);

View File

@@ -0,0 +1,195 @@
-- Views
-- This migration creates views for easier querying (must run last after all tables are created)
-- =============================================
-- 1. CREATE VIEWS FOR EASIER QUERYING
-- =============================================
-- View for experiments with all phase information
-- Note: Since phases are now per-repetition, this view shows phase data from the first repetition
CREATE OR REPLACE VIEW public.experiments_with_phases AS
SELECT
e.id,
e.experiment_number,
e.reps_required,
e.weight_per_repetition_lbs,
e.results_status,
e.completion_status,
e.phase_id,
e.created_at,
e.updated_at,
e.created_by,
ep.name as phase_name,
ep.description as phase_description,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
er.id as first_repetition_id,
er.repetition_number as first_repetition_number,
s.id as soaking_id,
s.scheduled_start_time as soaking_scheduled_start,
s.actual_start_time as soaking_actual_start,
s.soaking_duration_minutes,
s.scheduled_end_time as soaking_scheduled_end,
s.actual_end_time as soaking_actual_end,
ad.id as airdrying_id,
ad.scheduled_start_time as airdrying_scheduled_start,
ad.actual_start_time as airdrying_actual_start,
ad.duration_minutes as airdrying_duration,
ad.scheduled_end_time as airdrying_scheduled_end,
ad.actual_end_time as airdrying_actual_end,
c.id as cracking_id,
c.scheduled_start_time as cracking_scheduled_start,
c.actual_start_time as cracking_actual_start,
c.actual_end_time as cracking_actual_end,
mt.name as machine_type_name,
sh.id as shelling_id,
sh.scheduled_start_time as shelling_scheduled_start,
sh.actual_start_time as shelling_actual_start,
sh.actual_end_time as shelling_actual_end
FROM public.experiments e
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN LATERAL (
SELECT id, repetition_number
FROM public.experiment_repetitions
WHERE experiment_id = e.id
ORDER BY repetition_number
LIMIT 1
) er ON true
LEFT JOIN public.soaking s ON s.repetition_id = er.id
LEFT JOIN public.airdrying ad ON ad.repetition_id = er.id
LEFT JOIN public.cracking c ON c.repetition_id = er.id
LEFT JOIN public.machine_types mt ON c.machine_type_id = mt.id
LEFT JOIN public.shelling sh ON sh.repetition_id = er.id;
-- View for repetitions with phase information
CREATE OR REPLACE VIEW public.repetitions_with_phases AS
SELECT
er.id,
er.experiment_id,
er.repetition_number,
er.status,
er.created_at,
er.updated_at,
er.created_by,
e.experiment_number,
e.phase_id,
e.weight_per_repetition_lbs,
ep.name as phase_name,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
s.scheduled_start_time as soaking_scheduled_start,
s.actual_start_time as soaking_actual_start,
s.soaking_duration_minutes,
s.scheduled_end_time as soaking_scheduled_end,
s.actual_end_time as soaking_actual_end,
ad.scheduled_start_time as airdrying_scheduled_start,
ad.actual_start_time as airdrying_actual_start,
ad.duration_minutes as airdrying_duration,
ad.scheduled_end_time as airdrying_scheduled_end,
ad.actual_end_time as airdrying_actual_end,
c.scheduled_start_time as cracking_scheduled_start,
c.actual_start_time as cracking_actual_start,
c.actual_end_time as cracking_actual_end,
mt.name as machine_type_name,
sh.scheduled_start_time as shelling_scheduled_start,
sh.actual_start_time as shelling_actual_start,
sh.actual_end_time as shelling_actual_end
FROM public.experiment_repetitions er
JOIN public.experiments e ON er.experiment_id = e.id
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN public.soaking s ON er.id = s.repetition_id
LEFT JOIN public.airdrying ad ON er.id = ad.repetition_id
LEFT JOIN public.cracking c ON er.id = c.repetition_id
LEFT JOIN public.machine_types mt ON c.machine_type_id = mt.id
LEFT JOIN public.shelling sh ON er.id = sh.repetition_id;
-- View for available conductors with their roles
CREATE OR REPLACE VIEW public.available_conductors AS
SELECT
ca.*,
up.email,
up.first_name,
up.last_name,
r.name as role_name
FROM public.conductor_availability ca
JOIN public.user_profiles up ON ca.user_id = up.id
JOIN public.user_roles ur ON up.id = ur.user_id
JOIN public.roles r ON ur.role_id = r.id
WHERE ca.status = 'active'
AND r.name = 'conductor';
-- =============================================
-- 2. GRANT PERMISSIONS FOR VIEWS
-- =============================================
GRANT SELECT ON public.experiments_with_phases TO authenticated;
GRANT SELECT ON public.repetitions_with_phases TO authenticated;
GRANT SELECT ON public.available_conductors TO authenticated;
-- =============================================
-- 3. COMMENTS FOR DOCUMENTATION
-- =============================================
COMMENT ON VIEW public.experiments_with_phases IS 'Comprehensive view of experiments with all phase information and timing details';
COMMENT ON VIEW public.repetitions_with_phases IS 'View of experiment repetitions with associated phase data';
COMMENT ON VIEW public.available_conductors IS 'View of currently available conductors with their profile information';
-- =============================================
-- 4. SAMPLE DATA FUNCTIONS (OPTIONAL)
-- =============================================
-- Function to create sample roles
CREATE OR REPLACE FUNCTION public.create_sample_roles()
RETURNS VOID AS $$
BEGIN
INSERT INTO public.roles (name, description) VALUES
('admin', 'System administrator with full access'),
('conductor', 'Experiment conductor with limited access'),
('researcher', 'Research staff with read-only access')
ON CONFLICT (name) DO NOTHING;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Function to create sample machine types
CREATE OR REPLACE FUNCTION public.create_sample_machine_types()
RETURNS VOID AS $$
BEGIN
INSERT INTO public.machine_types (name, description, created_by) VALUES
('JC Cracker', 'Johnson Cracker machine for pecan shelling', (SELECT id FROM public.user_profiles LIMIT 1)),
('Meyer Cracker', 'Meyer Cracker machine for pecan shelling', (SELECT id FROM public.user_profiles LIMIT 1))
ON CONFLICT (name) DO NOTHING;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Function to create sample experiment phases
CREATE OR REPLACE FUNCTION public.create_sample_experiment_phases()
RETURNS VOID AS $$
DECLARE
jc_cracker_id UUID;
meyer_cracker_id UUID;
BEGIN
-- Get machine type IDs
SELECT id INTO jc_cracker_id FROM public.machine_types WHERE name = 'JC Cracker';
SELECT id INTO meyer_cracker_id FROM public.machine_types WHERE name = 'Meyer Cracker';
INSERT INTO public.experiment_phases (name, description, has_soaking, has_airdrying, has_cracking, has_shelling, cracking_machine_type_id, created_by) VALUES
('Full Process - JC Cracker', 'Complete pecan processing with JC Cracker', true, true, true, true, jc_cracker_id, (SELECT id FROM public.user_profiles LIMIT 1)),
('Full Process - Meyer Cracker', 'Complete pecan processing with Meyer Cracker', true, true, true, true, meyer_cracker_id, (SELECT id FROM public.user_profiles LIMIT 1)),
('Cracking Only - JC Cracker', 'JC Cracker cracking process only', false, false, true, false, jc_cracker_id, (SELECT id FROM public.user_profiles LIMIT 1)),
('Cracking Only - Meyer Cracker', 'Meyer Cracker cracking process only', false, false, true, false, meyer_cracker_id, (SELECT id FROM public.user_profiles LIMIT 1))
ON CONFLICT (name) DO NOTHING;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- =============================================
-- 5. GRANT PERMISSIONS FOR SAMPLE DATA FUNCTIONS
-- =============================================
GRANT EXECUTE ON FUNCTION public.create_sample_roles() TO authenticated;
GRANT EXECUTE ON FUNCTION public.create_sample_machine_types() TO authenticated;
GRANT EXECUTE ON FUNCTION public.create_sample_experiment_phases() TO authenticated;

View File

@@ -0,0 +1,300 @@
-- Unified Phase Executions Table
-- This migration replaces the separate phase tables (soaking, airdrying, cracking, shelling)
-- with a unified table that properly supports repetitions
-- =============================================
-- 1. CREATE UNIFIED PHASE EXECUTIONS TABLE
-- =============================================
CREATE TABLE IF NOT EXISTS public.experiment_phase_executions (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
repetition_id UUID NOT NULL REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
phase_type TEXT NOT NULL CHECK (phase_type IN ('soaking', 'airdrying', 'cracking', 'shelling')),
-- Scheduling fields (common to all phases)
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
scheduled_end_time TIMESTAMP WITH TIME ZONE,
actual_start_time TIMESTAMP WITH TIME ZONE,
actual_end_time TIMESTAMP WITH TIME ZONE,
-- Phase-specific parameters (nullable, only relevant for specific phases)
-- Soaking
soaking_duration_minutes INTEGER CHECK (soaking_duration_minutes > 0),
-- Airdrying
duration_minutes INTEGER CHECK (duration_minutes > 0),
-- Cracking
machine_type_id UUID REFERENCES public.machine_types(id),
-- Status tracking
status TEXT NOT NULL DEFAULT 'pending'
CHECK (status IN ('pending', 'scheduled', 'in_progress', 'completed', 'cancelled')),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id),
-- Ensure one execution per phase type per repetition
CONSTRAINT unique_phase_per_repetition UNIQUE (repetition_id, phase_type)
);
-- =============================================
-- 2. INDEXES FOR PERFORMANCE
-- =============================================
CREATE INDEX IF NOT EXISTS idx_phase_executions_repetition_id
ON public.experiment_phase_executions(repetition_id);
CREATE INDEX IF NOT EXISTS idx_phase_executions_phase_type
ON public.experiment_phase_executions(phase_type);
CREATE INDEX IF NOT EXISTS idx_phase_executions_status
ON public.experiment_phase_executions(status);
CREATE INDEX IF NOT EXISTS idx_phase_executions_scheduled_start_time
ON public.experiment_phase_executions(scheduled_start_time);
CREATE INDEX IF NOT EXISTS idx_phase_executions_machine_type_id
ON public.experiment_phase_executions(machine_type_id);
CREATE INDEX IF NOT EXISTS idx_phase_executions_created_by
ON public.experiment_phase_executions(created_by);
-- =============================================
-- 3. FUNCTION: Calculate Sequential Phase Start Times
-- =============================================
CREATE OR REPLACE FUNCTION calculate_sequential_phase_start_time()
RETURNS TRIGGER AS $$
DECLARE
prev_phase_end_time TIMESTAMP WITH TIME ZONE;
phase_order TEXT[] := ARRAY['soaking', 'airdrying', 'cracking', 'shelling'];
current_phase_index INT;
prev_phase_name TEXT;
BEGIN
-- Find current phase index in the sequence
SELECT array_position(phase_order, NEW.phase_type) INTO current_phase_index;
-- If not the first phase, get previous phase's end time from the same repetition
IF current_phase_index > 1 THEN
prev_phase_name := phase_order[current_phase_index - 1];
SELECT scheduled_end_time INTO prev_phase_end_time
FROM public.experiment_phase_executions
WHERE repetition_id = NEW.repetition_id
AND phase_type = prev_phase_name
ORDER BY created_at DESC
LIMIT 1;
-- If previous phase exists and has an end time, use it as start time
IF prev_phase_end_time IS NOT NULL THEN
NEW.scheduled_start_time := prev_phase_end_time;
END IF;
END IF;
-- Calculate end time based on duration (for phases with duration)
IF NEW.phase_type = 'soaking' AND NEW.soaking_duration_minutes IS NOT NULL THEN
NEW.scheduled_end_time := NEW.scheduled_start_time +
(NEW.soaking_duration_minutes || ' minutes')::INTERVAL;
ELSIF NEW.phase_type = 'airdrying' AND NEW.duration_minutes IS NOT NULL THEN
NEW.scheduled_end_time := NEW.scheduled_start_time +
(NEW.duration_minutes || ' minutes')::INTERVAL;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- =============================================
-- 4. FUNCTION: Auto-create Phase Executions for New Repetition
-- =============================================
CREATE OR REPLACE FUNCTION create_phase_executions_for_repetition()
RETURNS TRIGGER AS $$
DECLARE
exp_phase_config RECORD;
phase_type_list TEXT[] := ARRAY[]::TEXT[];
phase_name TEXT;
BEGIN
-- Get experiment phase configuration
-- Note: Phase durations may need to be set later when scheduling
SELECT
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
ep.cracking_machine_type_id
INTO exp_phase_config
FROM public.experiments e
JOIN public.experiment_phases ep ON e.phase_id = ep.id
WHERE e.id = NEW.experiment_id;
-- Build list of phases to create based on experiment configuration
IF exp_phase_config.has_soaking THEN
phase_type_list := array_append(phase_type_list, 'soaking');
END IF;
IF exp_phase_config.has_airdrying THEN
phase_type_list := array_append(phase_type_list, 'airdrying');
END IF;
IF exp_phase_config.has_cracking THEN
phase_type_list := array_append(phase_type_list, 'cracking');
END IF;
IF exp_phase_config.has_shelling THEN
phase_type_list := array_append(phase_type_list, 'shelling');
END IF;
-- Create phase executions for each required phase
FOREACH phase_name IN ARRAY phase_type_list
LOOP
INSERT INTO public.experiment_phase_executions (
repetition_id,
phase_type,
scheduled_start_time,
status,
created_by,
-- Phase-specific parameters
soaking_duration_minutes,
duration_minutes,
machine_type_id
)
VALUES (
NEW.id,
phase_name,
NOW(), -- Default start time, will be updated when scheduled or by sequential calculation
'pending',
NEW.created_by,
-- Set phase-specific parameters
-- Note: Durations will be set when the repetition is scheduled
-- These can be NULL initially and updated later
NULL, -- soaking_duration_minutes (set when scheduled)
NULL, -- duration_minutes (set when scheduled)
CASE WHEN phase_name = 'cracking'
THEN exp_phase_config.cracking_machine_type_id
ELSE NULL END
);
END LOOP;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- =============================================
-- 5. TRIGGERS
-- =============================================
-- Trigger to automatically calculate sequential times and durations
CREATE TRIGGER trigger_calculate_sequential_phase_times
BEFORE INSERT OR UPDATE ON public.experiment_phase_executions
FOR EACH ROW
EXECUTE FUNCTION calculate_sequential_phase_start_time();
-- Trigger to auto-create phases when repetition is created
CREATE TRIGGER trigger_create_phase_executions
AFTER INSERT ON public.experiment_repetitions
FOR EACH ROW
EXECUTE FUNCTION create_phase_executions_for_repetition();
-- Trigger for updated_at
CREATE TRIGGER set_updated_at_phase_executions
BEFORE UPDATE ON public.experiment_phase_executions
FOR EACH ROW
EXECUTE FUNCTION public.handle_updated_at();
-- =============================================
-- 6. CREATE VIEWS FOR PHASE-SPECIFIC ACCESS (Backward Compatibility)
-- =============================================
-- These views allow existing code to work with phase-specific "tables"
CREATE OR REPLACE VIEW public.soaking_view AS
SELECT
id,
(SELECT experiment_id FROM experiment_repetitions WHERE id = repetition_id) as experiment_id,
repetition_id,
scheduled_start_time,
actual_start_time,
soaking_duration_minutes,
scheduled_end_time,
actual_end_time,
created_at,
updated_at,
created_by
FROM public.experiment_phase_executions
WHERE phase_type = 'soaking';
CREATE OR REPLACE VIEW public.airdrying_view AS
SELECT
id,
(SELECT experiment_id FROM experiment_repetitions WHERE id = repetition_id) as experiment_id,
repetition_id,
scheduled_start_time,
actual_start_time,
duration_minutes,
scheduled_end_time,
actual_end_time,
created_at,
updated_at,
created_by
FROM public.experiment_phase_executions
WHERE phase_type = 'airdrying';
CREATE OR REPLACE VIEW public.cracking_view AS
SELECT
id,
(SELECT experiment_id FROM experiment_repetitions WHERE id = repetition_id) as experiment_id,
repetition_id,
machine_type_id,
scheduled_start_time,
actual_start_time,
actual_end_time,
created_at,
updated_at,
created_by
FROM public.experiment_phase_executions
WHERE phase_type = 'cracking';
CREATE OR REPLACE VIEW public.shelling_view AS
SELECT
id,
(SELECT experiment_id FROM experiment_repetitions WHERE id = repetition_id) as experiment_id,
repetition_id,
scheduled_start_time,
actual_start_time,
actual_end_time,
created_at,
updated_at,
created_by
FROM public.experiment_phase_executions
WHERE phase_type = 'shelling';
-- =============================================
-- 7. GRANT PERMISSIONS
-- =============================================
GRANT ALL ON public.experiment_phase_executions TO authenticated;
GRANT SELECT ON public.soaking_view TO authenticated;
GRANT SELECT ON public.airdrying_view TO authenticated;
GRANT SELECT ON public.cracking_view TO authenticated;
GRANT SELECT ON public.shelling_view TO authenticated;
-- =============================================
-- 8. ENABLE ROW LEVEL SECURITY
-- =============================================
ALTER TABLE public.experiment_phase_executions ENABLE ROW LEVEL SECURITY;
-- =============================================
-- 9. CREATE RLS POLICIES
-- =============================================
CREATE POLICY "Phase executions are viewable by authenticated users"
ON public.experiment_phase_executions
FOR SELECT USING (auth.role() = 'authenticated');
CREATE POLICY "Phase executions are insertable by authenticated users"
ON public.experiment_phase_executions
FOR INSERT WITH CHECK (auth.role() = 'authenticated');
CREATE POLICY "Phase executions are updatable by authenticated users"
ON public.experiment_phase_executions
FOR UPDATE USING (auth.role() = 'authenticated');
CREATE POLICY "Phase executions are deletable by authenticated users"
ON public.experiment_phase_executions
FOR DELETE USING (auth.role() = 'authenticated');

View File

@@ -0,0 +1,46 @@
-- Add repetition_id foreign key to cracker parameters tables
-- This migration adds a foreign key to link cracker parameters to their repetitions
-- =============================================
-- 1. ADD REPETITION_ID TO JC CRACKER PARAMETERS
-- =============================================
ALTER TABLE public.jc_cracker_parameters
ADD COLUMN IF NOT EXISTS repetition_id UUID REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE;
-- Add index for performance
CREATE INDEX IF NOT EXISTS idx_jc_cracker_parameters_repetition_id
ON public.jc_cracker_parameters(repetition_id);
-- Add unique constraint to ensure one parameter set per repetition
ALTER TABLE public.jc_cracker_parameters
ADD CONSTRAINT unique_jc_cracker_parameters_per_repetition
UNIQUE (repetition_id);
-- =============================================
-- 2. ADD REPETITION_ID TO MEYER CRACKER PARAMETERS
-- =============================================
ALTER TABLE public.meyer_cracker_parameters
ADD COLUMN IF NOT EXISTS repetition_id UUID REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE;
-- Add index for performance
CREATE INDEX IF NOT EXISTS idx_meyer_cracker_parameters_repetition_id
ON public.meyer_cracker_parameters(repetition_id);
-- Add unique constraint to ensure one parameter set per repetition
ALTER TABLE public.meyer_cracker_parameters
ADD CONSTRAINT unique_meyer_cracker_parameters_per_repetition
UNIQUE (repetition_id);

View File

@@ -0,0 +1,277 @@
-- View: Experiments with All Repetitions and Phase Parameters
-- This view provides a comprehensive view of experiments with all their repetitions
-- and all phase execution parameters (soaking, airdrying, cracking, shelling)
CREATE OR REPLACE VIEW public.experiments_with_all_reps_and_phases AS
SELECT
-- Experiment fields
e.id as experiment_id,
e.experiment_number,
e.reps_required,
e.weight_per_repetition_lbs,
e.results_status,
e.completion_status,
e.phase_id,
e.created_at as experiment_created_at,
e.updated_at as experiment_updated_at,
e.created_by as experiment_created_by,
-- Phase information
ep.name as phase_name,
ep.description as phase_description,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
ep.cracking_machine_type_id as phase_cracking_machine_type_id,
-- Repetition fields
er.id as repetition_id,
er.repetition_number,
er.status as repetition_status,
er.scheduled_date,
er.created_at as repetition_created_at,
er.updated_at as repetition_updated_at,
er.created_by as repetition_created_by,
-- Soaking phase execution
soaking_e.id as soaking_execution_id,
soaking_e.scheduled_start_time as soaking_scheduled_start,
soaking_e.actual_start_time as soaking_actual_start,
soaking_e.soaking_duration_minutes,
soaking_e.scheduled_end_time as soaking_scheduled_end,
soaking_e.actual_end_time as soaking_actual_end,
soaking_e.status as soaking_status,
-- Airdrying phase execution
airdrying_e.id as airdrying_execution_id,
airdrying_e.scheduled_start_time as airdrying_scheduled_start,
airdrying_e.actual_start_time as airdrying_actual_start,
airdrying_e.duration_minutes as airdrying_duration_minutes,
airdrying_e.scheduled_end_time as airdrying_scheduled_end,
airdrying_e.actual_end_time as airdrying_actual_end,
airdrying_e.status as airdrying_status,
-- Cracking phase execution
cracking_e.id as cracking_execution_id,
cracking_e.scheduled_start_time as cracking_scheduled_start,
cracking_e.actual_start_time as cracking_actual_start,
cracking_e.scheduled_end_time as cracking_scheduled_end,
cracking_e.actual_end_time as cracking_actual_end,
cracking_e.machine_type_id as cracking_machine_type_id,
cracking_e.status as cracking_status,
mt.name as machine_type_name,
-- Shelling phase execution
shelling_e.id as shelling_execution_id,
shelling_e.scheduled_start_time as shelling_scheduled_start,
shelling_e.actual_start_time as shelling_actual_start,
shelling_e.scheduled_end_time as shelling_scheduled_end,
shelling_e.actual_end_time as shelling_actual_end,
shelling_e.status as shelling_status
FROM public.experiments e
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN public.experiment_repetitions er ON er.experiment_id = e.id
LEFT JOIN public.experiment_phase_executions soaking_e
ON soaking_e.repetition_id = er.id AND soaking_e.phase_type = 'soaking'
LEFT JOIN public.experiment_phase_executions airdrying_e
ON airdrying_e.repetition_id = er.id AND airdrying_e.phase_type = 'airdrying'
LEFT JOIN public.experiment_phase_executions cracking_e
ON cracking_e.repetition_id = er.id AND cracking_e.phase_type = 'cracking'
LEFT JOIN public.experiment_phase_executions shelling_e
ON shelling_e.repetition_id = er.id AND shelling_e.phase_type = 'shelling'
LEFT JOIN public.machine_types mt ON cracking_e.machine_type_id = mt.id
ORDER BY e.experiment_number, er.repetition_number;
-- Grant permissions
GRANT SELECT ON public.experiments_with_all_reps_and_phases TO authenticated;
-- Function: Get experiment with all repetitions and phase parameters
-- This function returns a JSON structure with experiment and all its repetitions
CREATE OR REPLACE FUNCTION public.get_experiment_with_reps_and_phases(p_experiment_id UUID)
RETURNS TABLE (
experiment_id UUID,
experiment_number INTEGER,
phase_name TEXT,
repetitions JSONB
) AS $$
BEGIN
RETURN QUERY
SELECT
e.id,
e.experiment_number,
ep.name,
COALESCE(
jsonb_agg(
jsonb_build_object(
'repetition_id', er.id,
'repetition_number', er.repetition_number,
'status', er.status,
'scheduled_date', er.scheduled_date,
'soaking', jsonb_build_object(
'scheduled_start', soaking_e.scheduled_start_time,
'actual_start', soaking_e.actual_start_time,
'duration_minutes', soaking_e.soaking_duration_minutes,
'scheduled_end', soaking_e.scheduled_end_time,
'actual_end', soaking_e.actual_end_time,
'status', soaking_e.status
),
'airdrying', jsonb_build_object(
'scheduled_start', airdrying_e.scheduled_start_time,
'actual_start', airdrying_e.actual_start_time,
'duration_minutes', airdrying_e.duration_minutes,
'scheduled_end', airdrying_e.scheduled_end_time,
'actual_end', airdrying_e.actual_end_time,
'status', airdrying_e.status
),
'cracking', jsonb_build_object(
'scheduled_start', cracking_e.scheduled_start_time,
'actual_start', cracking_e.actual_start_time,
'scheduled_end', cracking_e.scheduled_end_time,
'actual_end', cracking_e.actual_end_time,
'machine_type_id', cracking_e.machine_type_id,
'machine_type_name', mt.name,
'status', cracking_e.status
),
'shelling', jsonb_build_object(
'scheduled_start', shelling_e.scheduled_start_time,
'actual_start', shelling_e.actual_start_time,
'scheduled_end', shelling_e.scheduled_end_time,
'actual_end', shelling_e.actual_end_time,
'status', shelling_e.status
)
)
ORDER BY er.repetition_number
),
'[]'::jsonb
) as repetitions
FROM public.experiments e
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN public.experiment_repetitions er ON er.experiment_id = e.id
LEFT JOIN public.experiment_phase_executions soaking_e
ON soaking_e.repetition_id = er.id AND soaking_e.phase_type = 'soaking'
LEFT JOIN public.experiment_phase_executions airdrying_e
ON airdrying_e.repetition_id = er.id AND airdrying_e.phase_type = 'airdrying'
LEFT JOIN public.experiment_phase_executions cracking_e
ON cracking_e.repetition_id = er.id AND cracking_e.phase_type = 'cracking'
LEFT JOIN public.experiment_phase_executions shelling_e
ON shelling_e.repetition_id = er.id AND shelling_e.phase_type = 'shelling'
LEFT JOIN public.machine_types mt ON cracking_e.machine_type_id = mt.id
WHERE e.id = p_experiment_id
GROUP BY e.id, e.experiment_number, ep.name;
END;
$$ LANGUAGE plpgsql SECURITY DEFINER;
-- Grant execute permission
GRANT EXECUTE ON FUNCTION public.get_experiment_with_reps_and_phases(UUID) TO authenticated;
-- Update the existing experiments_with_phases view to use unified table
CREATE OR REPLACE VIEW public.experiments_with_phases AS
SELECT
e.id,
e.experiment_number,
e.reps_required,
e.weight_per_repetition_lbs,
e.results_status,
e.completion_status,
e.phase_id,
e.created_at,
e.updated_at,
e.created_by,
ep.name as phase_name,
ep.description as phase_description,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
er.id as first_repetition_id,
er.repetition_number as first_repetition_number,
soaking_e.id as soaking_id,
soaking_e.scheduled_start_time as soaking_scheduled_start,
soaking_e.actual_start_time as soaking_actual_start,
soaking_e.soaking_duration_minutes,
soaking_e.scheduled_end_time as soaking_scheduled_end,
soaking_e.actual_end_time as soaking_actual_end,
airdrying_e.id as airdrying_id,
airdrying_e.scheduled_start_time as airdrying_scheduled_start,
airdrying_e.actual_start_time as airdrying_actual_start,
airdrying_e.duration_minutes as airdrying_duration,
airdrying_e.scheduled_end_time as airdrying_scheduled_end,
airdrying_e.actual_end_time as airdrying_actual_end,
cracking_e.id as cracking_id,
cracking_e.scheduled_start_time as cracking_scheduled_start,
cracking_e.actual_start_time as cracking_actual_start,
cracking_e.actual_end_time as cracking_actual_end,
mt.name as machine_type_name,
shelling_e.id as shelling_id,
shelling_e.scheduled_start_time as shelling_scheduled_start,
shelling_e.actual_start_time as shelling_actual_start,
shelling_e.actual_end_time as shelling_actual_end
FROM public.experiments e
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN LATERAL (
SELECT id, repetition_number
FROM public.experiment_repetitions
WHERE experiment_id = e.id
ORDER BY repetition_number
LIMIT 1
) er ON true
LEFT JOIN public.experiment_phase_executions soaking_e
ON soaking_e.repetition_id = er.id AND soaking_e.phase_type = 'soaking'
LEFT JOIN public.experiment_phase_executions airdrying_e
ON airdrying_e.repetition_id = er.id AND airdrying_e.phase_type = 'airdrying'
LEFT JOIN public.experiment_phase_executions cracking_e
ON cracking_e.repetition_id = er.id AND cracking_e.phase_type = 'cracking'
LEFT JOIN public.experiment_phase_executions shelling_e
ON shelling_e.repetition_id = er.id AND shelling_e.phase_type = 'shelling'
LEFT JOIN public.machine_types mt ON cracking_e.machine_type_id = mt.id;
-- Update repetitions_with_phases view to use unified table
CREATE OR REPLACE VIEW public.repetitions_with_phases AS
SELECT
er.id,
er.experiment_id,
er.repetition_number,
er.status,
er.created_at,
er.updated_at,
er.created_by,
e.experiment_number,
e.phase_id,
e.weight_per_repetition_lbs,
ep.name as phase_name,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
soaking_e.scheduled_start_time as soaking_scheduled_start,
soaking_e.actual_start_time as soaking_actual_start,
soaking_e.soaking_duration_minutes,
soaking_e.scheduled_end_time as soaking_scheduled_end,
soaking_e.actual_end_time as soaking_actual_end,
airdrying_e.scheduled_start_time as airdrying_scheduled_start,
airdrying_e.actual_start_time as airdrying_actual_start,
airdrying_e.duration_minutes as airdrying_duration,
airdrying_e.scheduled_end_time as airdrying_scheduled_end,
airdrying_e.actual_end_time as airdrying_actual_end,
cracking_e.scheduled_start_time as cracking_scheduled_start,
cracking_e.actual_start_time as cracking_actual_start,
cracking_e.actual_end_time as cracking_actual_end,
mt.name as machine_type_name,
shelling_e.scheduled_start_time as shelling_scheduled_start,
shelling_e.actual_start_time as shelling_actual_start,
shelling_e.actual_end_time as shelling_actual_end
FROM public.experiment_repetitions er
JOIN public.experiments e ON er.experiment_id = e.id
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN public.experiment_phase_executions soaking_e
ON er.id = soaking_e.repetition_id AND soaking_e.phase_type = 'soaking'
LEFT JOIN public.experiment_phase_executions airdrying_e
ON er.id = airdrying_e.repetition_id AND airdrying_e.phase_type = 'airdrying'
LEFT JOIN public.experiment_phase_executions cracking_e
ON er.id = cracking_e.repetition_id AND cracking_e.phase_type = 'cracking'
LEFT JOIN public.experiment_phase_executions shelling_e
ON er.id = shelling_e.repetition_id AND shelling_e.phase_type = 'shelling'
LEFT JOIN public.machine_types mt ON cracking_e.machine_type_id = mt.id;

600
supabase/seed_01_users.sql Executable file
View File

@@ -0,0 +1,600 @@
-- Seed Data for USDA Vision Pecan Experiments System
-- This file populates the database with initial data
-- =============================================
-- 1. INSERT ROLES
-- =============================================
INSERT INTO public.roles (name, description) VALUES
('admin', 'System administrator with full access to all features'),
('conductor', 'Experiment conductor who can manage experiments and view all data'),
('analyst', 'Data analyst who can view and analyze experiment results'),
('data recorder', 'Data entry specialist who can record experiment measurements');
-- =============================================
-- 2. CREATE ADMIN USER
-- =============================================
-- Create admin user in auth.users
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
's.alireza.v@gmail.com',
crypt('admin123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
-- Create user profile
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Alireza', 'Vaezi', 'active'
FROM auth.users
WHERE email = 's.alireza.v@gmail.com'
;
-- Assign admin role
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
up.id
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 's.alireza.v@gmail.com'
AND r.name = 'admin'
;
-- =============================================
-- 3. CREATE ADDITIONAL USERS
-- =============================================
-- Create Claire Floyd (Conductor & Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'Ashlyn.Floyd@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Claire', 'Floyd', 'active'
FROM auth.users
WHERE email = 'Ashlyn.Floyd@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'Ashlyn.Floyd@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Bruna Dos-Santos (Conductor & Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'bkvsantos@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Bruna', 'Dos-Santos', 'active'
FROM auth.users
WHERE email = 'bkvsantos@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'bkvsantos@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Beni Rodriguez (Conductor & Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'Beni.Rodriguez@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Beni', 'Rodriguez', 'active'
FROM auth.users
WHERE email = 'Beni.Rodriguez@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'Beni.Rodriguez@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Brendan Surio (Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'Brendan.Surio@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Brendan', 'Surio', 'active'
FROM auth.users
WHERE email = 'Brendan.Surio@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'Brendan.Surio@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create William Mcconnell (Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'William.McConnell@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'William', 'Mcconnell', 'active'
FROM auth.users
WHERE email = 'William.McConnell@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'William.McConnell@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Camille Deguzman (Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'cpd08598@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Camille', 'Deguzman', 'active'
FROM auth.users
WHERE email = 'cpd08598@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'cpd08598@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Justin Hetzler (Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'Justin.Hetzler@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Justin', 'Hetzler', 'active'
FROM auth.users
WHERE email = 'Justin.Hetzler@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'Justin.Hetzler@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Joshua Wilson (Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'jdw58940@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Joshua', 'Wilson', 'active'
FROM auth.users
WHERE email = 'jdw58940@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'jdw58940@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create Sydney Orlofsky (Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'Sydney.Orlofsky@uga.edu',
crypt('password123', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, first_name, last_name, status)
SELECT id, email, 'Sydney', 'Orlofsky', 'active'
FROM auth.users
WHERE email = 'Sydney.Orlofsky@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'Sydney.Orlofsky@uga.edu'
AND r.name IN ('conductor', 'data recorder')
;
-- Create engr-ugaif user (Conductor, Analyst & Data Recorder)
INSERT INTO auth.users (
instance_id,
id,
aud,
role,
email,
encrypted_password,
email_confirmed_at,
created_at,
updated_at,
confirmation_token,
email_change,
email_change_token_new,
recovery_token
) VALUES (
'00000000-0000-0000-0000-000000000000',
uuid_generate_v4(),
'authenticated',
'authenticated',
'engr-ugaif@uga.edu',
crypt('1048lab&2021', gen_salt('bf')),
NOW(),
NOW(),
NOW(),
'',
'',
'',
''
);
INSERT INTO public.user_profiles (id, email, status)
SELECT id, email, 'active'
FROM auth.users
WHERE email = 'engr-ugaif@uga.edu'
;
INSERT INTO public.user_roles (user_id, role_id, assigned_by)
SELECT
up.id,
r.id,
(SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')
FROM public.user_profiles up
CROSS JOIN public.roles r
WHERE up.email = 'engr-ugaif@uga.edu'
AND r.name IN ('conductor', 'analyst', 'data recorder')
;
-- =============================================
-- 4. CREATE MACHINE TYPES
-- =============================================
-- Insert default machine types
INSERT INTO public.machine_types (name, description, created_by) VALUES
('JC Cracker', 'JC Cracker machine with plate contact frequency and throughput parameters', (SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com')),
('Meyer Cracker', 'Meyer Cracker machine with motor speed and jig displacement parameters', (SELECT id FROM public.user_profiles WHERE email = 's.alireza.v@gmail.com'))
ON CONFLICT (name) DO NOTHING;
-- =============================================
-- 5. CREATE EXPERIMENT PHASES
-- =============================================
-- Create "Phase 2 of JC Experiments" phase
INSERT INTO public.experiment_phases (name, description, has_soaking, has_airdrying, has_cracking, has_shelling, cracking_machine_type_id, created_by)
SELECT
'Phase 2 of JC Experiments',
'Second phase of JC Cracker experiments for pecan processing optimization',
true,
true,
true,
false,
(SELECT id FROM public.machine_types WHERE name = 'JC Cracker'),
up.id
FROM public.user_profiles up
WHERE up.email = 's.alireza.v@gmail.com'
;
-- Create "Post Workshop Meyer Experiments" phase
INSERT INTO public.experiment_phases (name, description, has_soaking, has_airdrying, has_cracking, has_shelling, cracking_machine_type_id, created_by)
SELECT
'Post Workshop Meyer Experiments',
'Post workshop Meyer Cracker experiments for pecan processing optimization',
true,
true,
true,
false,
(SELECT id FROM public.machine_types WHERE name = 'Meyer Cracker'),
up.id
FROM public.user_profiles up
WHERE up.email = 's.alireza.v@gmail.com'
;

File diff suppressed because it is too large Load Diff