Remove deprecated files and scripts to streamline the codebase

- Deleted unused API test files, RTSP diagnostic scripts, and development utility scripts to reduce clutter.
- Removed outdated database schema and modularization proposal documents to maintain focus on current architecture.
- Cleaned up configuration files and logging scripts that are no longer in use, enhancing project maintainability.
This commit is contained in:
salirezav
2025-11-02 10:07:59 -05:00
parent f1a9cb0c1e
commit f6a37ca1ba
50 changed files with 7057 additions and 368 deletions

View File

@@ -17,7 +17,7 @@
},
"system": {
"camera_check_interval_seconds": 2,
"log_level": "DEBUG",
"log_level": "WARNING",
"log_file": "usda_vision_system.log",
"api_host": "0.0.0.0",
"api_port": 8000,

View File

@@ -123,7 +123,7 @@ class APIServer:
def _setup_routes(self):
"""Setup API routes"""
# Register routes from modules
register_system_routes(
app=self.app,
@@ -299,7 +299,24 @@ class APIServer:
self._event_loop = asyncio.new_event_loop()
asyncio.set_event_loop(self._event_loop)
uvicorn.run(self.app, host=self.config.system.api_host, port=self.config.system.api_port, log_level="info")
# Map our log level to uvicorn's log level
uvicorn_log_level_map = {
"DEBUG": "debug",
"INFO": "info",
"WARNING": "warning",
"ERROR": "error",
"CRITICAL": "critical"
}
config_log_level = self.config.system.log_level.upper()
uvicorn_log_level = uvicorn_log_level_map.get(config_log_level, "warning")
uvicorn.run(
self.app,
host=self.config.system.api_host,
port=self.config.system.api_port,
log_level=uvicorn_log_level,
access_log=False # Disable access logs (GET, POST, etc.) to reduce noise
)
except Exception as e:
self.logger.error(f"Error running API server: {e}")
finally:

View File

@@ -186,34 +186,48 @@ class CameraMonitor:
# Ensure SDK is initialized
ensure_sdk_initialized()
self.logger.info(f"Attempting to initialize camera {camera_name} for availability test...")
# Suppress output to avoid MVCAMAPI error messages during camera testing
with suppress_camera_errors():
hCamera = mvsdk.CameraInit(device_info, -1, -1)
hCamera = None
try:
with suppress_camera_errors():
hCamera = mvsdk.CameraInit(device_info, -1, -1)
self.logger.info(f"Camera {camera_name} initialized successfully, starting test capture...")
except mvsdk.CameraException as init_e:
self.logger.warning(f"CameraInit failed for {camera_name}: {init_e.message} (error_code: {init_e.error_code})")
return "error", f"Camera initialization failed: {init_e.message}", self._get_device_info_dict(device_info)
# Quick test - try to get one frame
try:
mvsdk.CameraSetTriggerMode(hCamera, 0)
mvsdk.CameraPlay(hCamera)
self.logger.info(f"Camera {camera_name} test: Attempting to capture frame with {CAMERA_TEST_CAPTURE_TIMEOUT}ms timeout...")
# Try to capture with short timeout
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(hCamera, CAMERA_TEST_CAPTURE_TIMEOUT)
mvsdk.CameraReleaseImageBuffer(hCamera, pRawData)
# Success - camera is available
mvsdk.CameraUnInit(hCamera)
self.logger.info(f"Camera {camera_name} test successful - camera is available")
return "available", "Camera test successful", self._get_device_info_dict(device_info)
except mvsdk.CameraException as e:
mvsdk.CameraUnInit(hCamera)
if e.error_code == mvsdk.CAMERA_STATUS_TIME_OUT:
except mvsdk.CameraException as capture_e:
if hCamera:
mvsdk.CameraUnInit(hCamera)
self.logger.warning(f"Camera {camera_name} capture test failed: {capture_e.message} (error_code: {capture_e.error_code})")
if capture_e.error_code == mvsdk.CAMERA_STATUS_TIME_OUT:
return "available", "Camera available but slow response", self._get_device_info_dict(device_info)
else:
return "error", f"Camera test failed: {e.message}", self._get_device_info_dict(device_info)
return "error", f"Camera test failed: {capture_e.message}", self._get_device_info_dict(device_info)
except mvsdk.CameraException as e:
return "error", f"Camera initialization failed: {e.message}", self._get_device_info_dict(device_info)
self.logger.error(f"CameraException during initialization test for {camera_name}: {e.message} (error_code: {e.error_code})")
return "error", f"Camera initialization failed: {e.message}", self._get_device_info_dict(device_info) if device_info else None
except Exception as e:
self.logger.error(f"Unexpected exception during camera check for {camera_name}: {e}", exc_info=True)
return "error", f"Camera check failed: {str(e)}", None
def _get_device_info_dict(self, device_info) -> Dict[str, Any]:

View File

@@ -96,7 +96,7 @@ class SystemConfig:
"""System-wide configuration"""
camera_check_interval_seconds: int = 2
log_level: str = "INFO"
log_level: str = "WARNING"
log_file: str = "usda_vision_system.log"
api_host: str = "0.0.0.0"
api_port: int = 8000

View File

@@ -112,23 +112,32 @@ class USDAVisionLogger:
def _setup_component_loggers(self) -> None:
"""Setup specific log levels for different components"""
# MQTT client - can be verbose
# MQTT client - reduce INFO logs
mqtt_logger = logging.getLogger('usda_vision_system.mqtt')
if self.log_level == 'DEBUG':
mqtt_logger.setLevel(logging.DEBUG)
else:
elif self.log_level == 'INFO':
mqtt_logger.setLevel(logging.INFO)
else:
mqtt_logger.setLevel(logging.WARNING)
# Camera components - important for debugging
# Camera components - reduce INFO logs, keep WARNING and above
camera_logger = logging.getLogger('usda_vision_system.camera')
camera_logger.setLevel(logging.INFO)
if self.log_level == 'DEBUG':
camera_logger.setLevel(logging.DEBUG)
elif self.log_level == 'INFO':
camera_logger.setLevel(logging.INFO)
else:
camera_logger.setLevel(logging.WARNING)
# API server - can be noisy
# API server - reduce INFO noise
api_logger = logging.getLogger('usda_vision_system.api')
if self.log_level == 'DEBUG':
api_logger.setLevel(logging.DEBUG)
else:
elif self.log_level == 'INFO':
api_logger.setLevel(logging.INFO)
else:
api_logger.setLevel(logging.WARNING)
# Uvicorn - reduce noise unless debugging
uvicorn_logger = logging.getLogger('uvicorn')

View File

@@ -45,8 +45,21 @@ class StandaloneAutoRecorder:
# Setup logging (only if not already configured)
if not logging.getLogger().handlers:
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", handlers=[logging.FileHandler("standalone_auto_recorder.log"), logging.StreamHandler()])
# Use WARNING level by default to reduce INFO log noise
log_level = getattr(self.config.system, 'log_level', 'WARNING')
log_level_num = getattr(logging, log_level.upper(), logging.WARNING)
logging.basicConfig(
level=log_level_num,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[
logging.FileHandler("standalone_auto_recorder.log"),
logging.StreamHandler()
]
)
self.logger = logging.getLogger(__name__)
# Ensure this logger respects the configured log level
if hasattr(self.config, 'system') and hasattr(self.config.system, 'log_level'):
self.logger.setLevel(getattr(logging, self.config.system.log_level.upper(), logging.WARNING))
# Initialize components
self.state_manager = StateManager()
@@ -59,6 +72,9 @@ class StandaloneAutoRecorder:
self.camera_recorders: Dict[str, CameraRecorder] = {}
self.active_recordings: Dict[str, str] = {} # camera_name -> filename
# Camera device cache
self._device_list: Optional[list] = None
# Machine to camera mapping
self.machine_camera_map = self._build_machine_camera_map()
@@ -257,7 +273,7 @@ class StandaloneAutoRecorder:
return None
def _find_camera_device(self, camera_name: str):
"""Simplified camera device discovery"""
"""Find camera device by matching serial number or using index mapping"""
try:
# Import camera SDK
import sys
@@ -266,23 +282,73 @@ class StandaloneAutoRecorder:
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "camera_sdk"))
import mvsdk
# Initialize SDK
mvsdk.CameraSdkInit(1)
# Initialize SDK (only if not already initialized)
try:
mvsdk.CameraSdkInit(1)
except:
pass # SDK may already be initialized
# Enumerate cameras
device_list = mvsdk.CameraEnumerateDevice()
# For now, map by index (camera1 = index 0, camera2 = index 1)
camera_index = int(camera_name.replace("camera", "")) - 1
if 0 <= camera_index < len(device_list):
return device_list[camera_index]
# Cache device list to avoid re-enumerating
if self._device_list is None:
device_list = mvsdk.CameraEnumerateDevice()
self._device_list = device_list
self.logger.info(f"Enumerated {len(device_list)} camera device(s)")
else:
self.logger.error(f"Camera index {camera_index} not found (total: {len(device_list)})")
device_list = self._device_list
if len(device_list) == 0:
self.logger.error("No cameras detected")
return None
# Find camera config to get serial number or device_index if available
camera_config = None
for config in self.config.cameras:
if config.name == camera_name:
camera_config = config
break
# Try to match by serial number if available in device info
if camera_config:
# Check if config has device_index specified
device_index = getattr(camera_config, 'device_index', None)
if device_index is not None and 0 <= device_index < len(device_list):
self.logger.info(f"Using device_index {device_index} for {camera_name}")
return device_list[device_index]
# Try matching by serial number from camera config if available
config_serial = getattr(camera_config, 'serial_number', None)
if config_serial:
for i, dev_info in enumerate(device_list):
try:
dev_serial = getattr(dev_info, 'acSn', None) or getattr(dev_info, 'GetSn', lambda: None)()
if dev_serial and str(dev_serial) == str(config_serial):
self.logger.info(f"Matched {camera_name} to device {i} by serial number: {dev_serial}")
return dev_info
except:
continue
# Fallback to index mapping (camera1 = index 0, camera2 = index 1, etc.)
try:
camera_index = int(camera_name.replace("camera", "")) - 1
if 0 <= camera_index < len(device_list):
self.logger.info(f"Using index mapping for {camera_name} -> device {camera_index}")
return device_list[camera_index]
else:
self.logger.error(f"Camera index {camera_index} not found (total: {len(device_list)}). Available indices: 0-{len(device_list)-1}")
# If only one camera is available, use it for any camera name
if len(device_list) == 1:
self.logger.warning(f"Only 1 camera detected, using it for {camera_name}")
return device_list[0]
except ValueError:
pass
self.logger.error(f"No device found for camera {camera_name}")
return None
except Exception as e:
self.logger.error(f"Error finding camera device: {e}")
import traceback
self.logger.debug(traceback.format_exc())
return None
def start(self) -> bool:

View File

@@ -1,90 +0,0 @@
#!/bin/bash
# USDA Vision Development Logs Script
# This script shows logs from the development environment
set -e
echo "📋 USDA Vision Development Logs"
echo "==============================="
# Check if docker-compose.dev.yml exists
if [ ! -f "docker-compose.dev.yml" ]; then
echo "❌ Error: docker-compose.dev.yml not found!"
echo "Please make sure you're in the project root directory."
exit 1
fi
# Function to show help
show_help() {
echo "Usage: $0 [OPTIONS] [SERVICE]"
echo ""
echo "Options:"
echo " -f, --follow Follow log output (like tail -f)"
echo " -t, --tail N Show last N lines (default: 100)"
echo " -h, --help Show this help message"
echo ""
echo "Services:"
echo " api Show API service logs only"
echo " web Show web service logs only"
echo " (no service) Show logs from all services"
echo ""
echo "Examples:"
echo " $0 # Show last 100 lines from all services"
echo " $0 -f # Follow all logs in real-time"
echo " $0 -f api # Follow API logs only"
echo " $0 -t 50 web # Show last 50 lines from web service"
}
# Default values
FOLLOW=false
TAIL_LINES=100
SERVICE=""
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-f|--follow)
FOLLOW=true
shift
;;
-t|--tail)
TAIL_LINES="$2"
shift 2
;;
-h|--help)
show_help
exit 0
;;
api|web)
SERVICE="$1"
shift
;;
*)
echo "❌ Unknown option: $1"
show_help
exit 1
;;
esac
done
# Build docker compose command
COMPOSE_CMD="docker compose -f docker-compose.dev.yml logs"
if [ "$FOLLOW" = true ]; then
COMPOSE_CMD="$COMPOSE_CMD -f"
fi
if [ "$TAIL_LINES" != "100" ]; then
COMPOSE_CMD="$COMPOSE_CMD --tail=$TAIL_LINES"
fi
if [ -n "$SERVICE" ]; then
COMPOSE_CMD="$COMPOSE_CMD $SERVICE"
fi
echo "Running: $COMPOSE_CMD"
echo ""
# Execute the command
eval $COMPOSE_CMD

View File

@@ -1,68 +0,0 @@
#!/bin/bash
# USDA Vision Development Shell Script
# This script opens a shell in the running development container
set -e
echo "🐚 USDA Vision Development Shell"
echo "================================"
# Check if docker-compose.dev.yml exists
if [ ! -f "docker-compose.dev.yml" ]; then
echo "❌ Error: docker-compose.dev.yml not found!"
echo "Please make sure you're in the project root directory."
exit 1
fi
# Function to show help
show_help() {
echo "Usage: $0 [SERVICE]"
echo ""
echo "Services:"
echo " api Open shell in API container (default)"
echo " web Open shell in web container"
echo ""
echo "Examples:"
echo " $0 # Open shell in API container"
echo " $0 api # Open shell in API container"
echo " $0 web # Open shell in web container"
}
# Default service
SERVICE="api"
# Parse arguments
while [[ $# -gt 0 ]]; do
case $1 in
-h|--help)
show_help
exit 0
;;
api|web)
SERVICE="$1"
shift
;;
*)
echo "❌ Unknown option: $1"
show_help
exit 1
;;
esac
done
echo "🔍 Checking if $SERVICE container is running..."
# Check if the service is running
if ! docker compose -f docker-compose.dev.yml ps $SERVICE | grep -q "Up"; then
echo "❌ Error: $SERVICE container is not running!"
echo "Please start the development environment first with: ./dev-start.sh"
exit 1
fi
echo "🚀 Opening shell in $SERVICE container..."
echo "💡 Tip: Use 'exit' to return to your host shell"
echo ""
# Execute shell in the container
docker compose -f docker-compose.dev.yml exec $SERVICE /bin/bash

View File

@@ -1,32 +0,0 @@
#!/bin/bash
# USDA Vision Development Startup Script
# This script starts the development environment with proper logging and debugging
set -e
echo "🚀 Starting USDA Vision Development Environment"
echo "=============================================="
# Check if docker-compose.dev.yml exists
if [ ! -f "docker-compose.dev.yml" ]; then
echo "❌ Error: docker-compose.dev.yml not found!"
echo "Please make sure you're in the project root directory."
exit 1
fi
# Check if .env file exists for web app
if [ ! -f "management-dashboard-web-app/.env" ]; then
echo "⚠️ Warning: management-dashboard-web-app/.env not found!"
echo "You may need to create it from .env.example"
echo "Continuing anyway..."
fi
echo "📦 Building and starting development containers..."
echo ""
# Start the development environment
docker compose -f docker-compose.dev.yml up --build
echo ""
echo "🛑 Development environment stopped"

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# USDA Vision Development Stop Script
# This script stops the development environment
set -e
echo "🛑 Stopping USDA Vision Development Environment"
echo "=============================================="
# Check if docker-compose.dev.yml exists
if [ ! -f "docker-compose.dev.yml" ]; then
echo "❌ Error: docker-compose.dev.yml not found!"
echo "Please make sure you're in the project root directory."
exit 1
fi
echo "🔄 Stopping development containers..."
# Stop the development environment
docker compose -f docker-compose.dev.yml down
echo ""
echo "✅ Development environment stopped successfully"
echo ""
echo "💡 Tip: Use './dev-start.sh' to start the development environment again"
echo "💡 Tip: Use './dev-logs.sh' to view logs from the last run"

View File

@@ -1,106 +0,0 @@
services:
api:
build:
context: ./camera-management-api
dockerfile: Dockerfile
working_dir: /app
volumes:
- ./camera-management-api:/app
- /mnt/nfs_share:/mnt/nfs_share
- /etc/localtime:/etc/localtime:ro
- /etc/timezone:/etc/timezone:ro
environment:
- PYTHONUNBUFFERED=1
- LD_LIBRARY_PATH=/usr/local/lib:/lib:/usr/lib
- PYTHONPATH=/app:/app/camera_sdk
- TZ=America/New_York
# Development-specific environment variables
- FLASK_ENV=development
- FLASK_DEBUG=1
- PYTHONDONTWRITEBYTECODE=1
command: >
sh -lc "
apt-get update && apt-get install -y libusb-1.0-0-dev;
# Install camera SDK if not already installed
if [ ! -f /lib/libMVSDK.so ] && [ -f 'camera_sdk/linuxSDK_V2.1.0.49(250108)/install.sh' ]; then
echo 'Installing camera SDK...';
cd 'camera_sdk/linuxSDK_V2.1.0.49(250108)';
chmod +x install.sh;
./install.sh;
cd /app;
echo 'Camera SDK installed successfully';
else
echo 'Camera SDK already installed or install script not found';
fi;
# Install Python dependencies
if [ -f requirements.txt ]; then
pip install --no-cache-dir -r requirements.txt;
else
pip install --no-cache-dir -e .;
fi;
# Start the application in development mode with verbose logging
echo 'Starting API in development mode...';
python main.py --config config.compose.json --debug --verbose
"
network_mode: host
# Keep container running for debugging
stdin_open: true
tty: true
# Add labels for easier identification
labels:
- "com.usda-vision.service=api"
- "com.usda-vision.environment=development"
web:
image: node:20-alpine
working_dir: /app
env_file:
- ./management-dashboard-web-app/.env
volumes:
- ./management-dashboard-web-app:/app
environment:
- CHOKIDAR_USEPOLLING=true
- TZ=America/New_York
# Development-specific environment variables
- NODE_ENV=development
- VITE_DEV_SERVER_HOST=0.0.0.0
- VITE_DEV_SERVER_PORT=8080
command: >
sh -lc "
echo 'Installing dependencies...';
npm ci;
echo 'Starting web development server...';
npm run dev -- --host 0.0.0.0 --port 8080 --verbose
"
# Ensure the web container can resolve host.docker.internal on Linux
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "8080:8080"
# Keep container running for debugging
stdin_open: true
tty: true
# Add labels for easier identification
labels:
- "com.usda-vision.service=web"
- "com.usda-vision.environment=development"
# Optional: Add a development database if needed
# db:
# image: postgres:15-alpine
# environment:
# POSTGRES_DB: usda_vision_dev
# POSTGRES_USER: dev
# POSTGRES_PASSWORD: dev
# volumes:
# - postgres_dev_data:/var/lib/postgresql/data
# ports:
# - "5432:5432"
# labels:
# - "com.usda-vision.service=database"
# - "com.usda-vision.environment=development"
# volumes:
# postgres_dev_data:

View File

@@ -85,6 +85,25 @@ services:
ports:
- "3001:3001"
vision-system-remote:
image: node:20-alpine
working_dir: /app
environment:
- CHOKIDAR_USEPOLLING=true
- TZ=America/New_York
- VITE_VISION_API_URL=http://exp-dash:8000
volumes:
- ./vision-system-remote:/app
command: >
sh -lc "
npm install;
npm run dev:watch
"
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "3002:3002"
media-api:
build:
context: ./media-api

24
docs/README.md Normal file
View File

@@ -0,0 +1,24 @@
# Documentation
This directory contains project documentation and reference materials.
## Documentation Files
- **`CODE_QUALITY_IMPROVEMENTS.md`** - Analysis and suggestions for improving code quality in the camera-management-api
- **`MODULARIZATION_PROPOSAL.md`** - Different strategies for modularizing the API, including recommendations
- **`REFACTORING_PLAN.md`** - Step-by-step quick start guide for implementing code quality improvements
- **`REFACTORING_SUMMARY.md`** - Complete summary of the modularization and refactoring work performed (frontend microfrontends + backend improvements)
- **`database_schema.md`** - Database schema documentation
- **`rtsp_access_guide.md`** - Guide for accessing RTSP streams
- **`test_rtsp_working.md`** - Notes about RTSP testing
## Main Documentation
See the root `README.md` for project overview, setup instructions, and quick start guide.

358
docs/REFACTORING_SUMMARY.md Normal file
View File

@@ -0,0 +1,358 @@
# Code Quality Refactoring Summary
**Date**: November 2025
**Branch**: Modularization branch
**Status**: ✅ Completed and Verified
## Overview
This document summarizes the modularization and code quality refactoring work performed on the USDA Vision system. The work was done in two phases:
1. **Frontend Modularization** (React Dashboard) - Extracted features into microfrontends using Module Federation
2. **Backend Refactoring** (Camera Management API) - Improved code organization within the monolithic architecture
This document focuses primarily on Phase 2 (API refactoring), but provides context about the overall modularization strategy.
## Project Context: Two-Phase Modularization
### Phase 1: Frontend Modularization (React Dashboard)
**Status**: ✅ Completed
Before working on the API, we first modularized the React dashboard application into a microfrontend architecture:
- **Approach**: Used Vite Module Federation to create independently deployable frontend modules
- **First Module Extracted**: `video-remote` - The video library feature was extracted into its own microfrontend
- **Architecture**:
- Main dashboard acts as a "shell" application
- Remotely loads `video-remote` module when enabled via feature flags
- Supports gradual migration (local fallback components remain available)
- **Infrastructure Changes**:
- Created separate `media-api` container for video processing (thumbnails, transcoding)
- Added `mediamtx` container for RTSP/WebRTC streaming
- `video-remote` container runs independently and can be updated separately
- **Benefits Achieved**:
- Independent deployment of video library feature
- Better separation of concerns (media handling separate from main dashboard)
- Foundation for extracting more features (camera management, experiments, etc.)
### Phase 2: Backend Refactoring (Camera Management API)
**Status**: ✅ Completed
After successfully modularizing the frontend, we focused on improving the backend code quality. **Important**: We chose NOT to split the API into microservices, but rather to improve the organization within the existing monolithic architecture.
- **Approach**: Simple, low-risk refactorings within the monolithic structure
- **Philosophy**: "Simplest least destructive code refactorings that can significantly make the code more readable and manageable and editable"
- **Decision**: Keep monolithic architecture (no microservices) but improve internal organization
- **Why Monolithic**:
- Camera SDK and hardware interactions require tight coupling
- System is stable and working well
- Full microservices would be overkill and add complexity
- Focus on code quality over architectural changes
---
## Motivation
The API refactoring was requested to improve code quality and manageability within the existing monolithic architecture. The goal was to improve organization without resorting to full microservices architecture or breaking changes, following the successful pattern we established with the frontend modularization.
## Refactoring Tasks Completed
### 1. Extract Duplicate Code (`suppress_camera_errors`)
**Problem**: The `suppress_camera_errors()` context manager was duplicated in three files:
- `camera/recorder.py`
- `camera/streamer.py`
- `camera/monitor.py`
**Solution**:
- Created `camera/utils.py` with the centralized `suppress_camera_errors()` function
- Updated all three files to import from `utils` instead of defining locally
**Files Changed**:
- ✅ Created: `camera-management-api/usda_vision_system/camera/utils.py`
- ✅ Updated: `camera/recorder.py` - removed local definition, added import
- ✅ Updated: `camera/streamer.py` - removed local definition, added import
- ✅ Updated: `camera/monitor.py` - removed local definition, added import
**Benefits**:
- Single source of truth for error suppression logic
- Easier maintenance (fix bugs in one place)
- Consistent behavior across all camera modules
---
### 2. Extract Magic Numbers into Constants
**Problem**: Magic numbers scattered throughout camera code made it hard to understand intent and adjust settings:
- Queue sizes (5, 10, 30)
- Frame rates (10.0, 15.0, 30.0)
- Timeouts (200, 1000, 500 milliseconds)
- JPEG quality (70)
- Sleep intervals (0.1 seconds)
**Solution**:
- Created `camera/constants.py` with well-named constants
- Replaced all magic numbers with constant references
**Constants Defined**:
```python
# Queue sizes
MJPEG_QUEUE_MAXSIZE = 5
RTSP_QUEUE_MAXSIZE = 10
RECORDING_QUEUE_MAXSIZE = 30
# Frame rates
PREVIEW_FPS = 10.0
RTSP_FPS = 15.0
DEFAULT_VIDEO_FPS = 30.0
# JPEG quality
PREVIEW_JPEG_QUALITY = 70
# Timeouts (milliseconds)
CAMERA_GET_BUFFER_TIMEOUT = 200
CAMERA_INIT_TIMEOUT = 1000
CAMERA_TEST_CAPTURE_TIMEOUT = 500
# Sleep intervals (seconds)
STREAMING_LOOP_SLEEP = 0.1
BRIEF_PAUSE_SLEEP = 0.1
```
**Files Changed**:
- ✅ Created: `camera-management-api/usda_vision_system/camera/constants.py`
- ✅ Updated: `camera/recorder.py` - replaced magic numbers with constants
- ✅ Updated: `camera/streamer.py` - replaced magic numbers with constants
- ✅ Updated: `camera/manager.py` - replaced magic numbers with constants
- ✅ Updated: `camera/monitor.py` - added import for `CAMERA_TEST_CAPTURE_TIMEOUT`
**Benefits**:
- Self-documenting code (constants explain what values represent)
- Easy to adjust performance settings (change in one place)
- Reduced risk of inconsistent values across modules
- Better code readability
---
### 3. Split Monolithic API Routes into Domain Modules
**Problem**: `api/server.py` was 868 lines with all routes defined in a single `_setup_routes()` method, making it:
- Hard to navigate and find specific endpoints
- Difficult to maintain (one large file)
- Prone to merge conflicts
- Not following separation of concerns
**Solution**:
- Created `api/routes/` directory with domain-specific route modules
- Each module exports a `register_*_routes()` function
- Updated `server.py` to import and call these registration functions
**New File Structure**:
```
api/routes/
├── __init__.py # Exports all register functions
├── system_routes.py # /, /health, /system/status, /system/video-module
├── camera_routes.py # /cameras, /cameras/{name}/*, RTSP endpoints
├── recording_routes.py # /cameras/{name}/start-recording, stop-recording
├── mqtt_routes.py # /mqtt/status, /mqtt/events
├── storage_routes.py # /storage/stats, /storage/files, /storage/cleanup
├── auto_recording_routes.py # /cameras/{name}/auto-recording/*
└── recordings_routes.py # /recordings
```
**Files Changed**:
- ✅ Created: `api/routes/__init__.py`
- ✅ Created: `api/routes/system_routes.py` - 7 routes
- ✅ Created: `api/routes/camera_routes.py` - 14 routes
- ✅ Created: `api/routes/recording_routes.py` - 2 routes
- ✅ Created: `api/routes/mqtt_routes.py` - 2 routes
- ✅ Created: `api/routes/storage_routes.py` - 3 routes
- ✅ Created: `api/routes/auto_recording_routes.py` - 3 routes
- ✅ Created: `api/routes/recordings_routes.py` - 1 route
- ✅ Updated: `api/server.py` - reduced from 868 lines to 315 lines (63% reduction)
**Remaining in `server.py`**:
- WebSocket endpoint (`/ws`) - kept here as it's core to the server
- Debug endpoint (`/debug/camera-manager`) - utility endpoint
- Video module route integration - dynamic route inclusion
**Import Path Corrections Made**:
- Fixed all route modules to use correct relative imports:
- `from ...core.config` (three levels up from `api/routes/`)
- `from ..models` (one level up to `api/models`)
- Fixed `AutoRecordingManager` import path (was `auto_recording.manager`, corrected to `recording.auto_manager`)
- Added proper type hints to all registration functions
**Benefits**:
- **63% reduction** in `server.py` size (868 → 315 lines)
- Routes organized by domain (easy to find specific endpoints)
- Easier maintenance (smaller, focused files)
- Reduced merge conflicts (different developers work on different route modules)
- Better code organization following separation of concerns
- Easier to test (can test route modules independently)
---
## Key Design Decisions
### Why Keep WebSocket in `server.py`?
The WebSocket endpoint (`/ws`) was kept in `server.py` because:
- It's tightly coupled with the `WebSocketManager` class defined in `server.py`
- It's core functionality, not a domain-specific feature
- Moving it would require refactoring the manager class as well
### Why Use `register_*_routes()` Functions?
Each route module exports a function that takes dependencies (app, managers, logger) and registers routes. This pattern:
- Keeps route modules testable (can pass mock dependencies)
- Allows `server.py` to control dependency injection
- Makes it clear what dependencies each route module needs
### Why Not Move Debug Endpoint?
The `/debug/camera-manager` endpoint could be moved to `camera_routes.py`, but it was kept in `server.py` as a utility endpoint for debugging the server's internal state. This is a reasonable design choice for debug utilities.
---
## Verification
All refactoring changes were verified to work correctly:
**API Starts Successfully**
- No import errors
- No syntax errors
- All route modules load correctly
**Endpoints Function Correctly**
- `/health` - Returns healthy status
- `/system/status` - Returns system status with cameras, machines, recordings
- `/cameras` - Returns camera status (both cameras now show correct status)
- All other endpoints maintain functionality
**No Regressions**
- Camera monitoring works correctly (camera1 shows "available" status)
- Constants are properly imported and used
- Utility functions work as expected
---
## Migration Notes for Future Developers
### Adding New Routes
1. **Identify the domain**: Which route module does your endpoint belong to?
- System/health → `system_routes.py`
- Camera operations → `camera_routes.py`
- Recording → `recording_routes.py`
- Storage → `storage_routes.py`
- MQTT → `mqtt_routes.py`
- Auto-recording → `auto_recording_routes.py`
- Recording sessions → `recordings_routes.py`
2. **Add route to appropriate module**: Use the existing pattern:
```python
@app.get("/your/endpoint")
async def your_endpoint():
# Implementation
```
3. **If creating a new domain**:
- Create new file: `api/routes/your_domain_routes.py`
- Export `register_your_domain_routes()` function
- Add import to `api/routes/__init__.py`
- Register in `server.py`'s `_setup_routes()` method
### Using Constants
When you need camera-related constants:
```python
from ...camera.constants import CAMERA_GET_BUFFER_TIMEOUT, PREVIEW_FPS
```
### Using Utility Functions
When you need camera error suppression:
```python
from ...camera.utils import suppress_camera_errors
with suppress_camera_errors():
# Camera operations
```
---
## Related Documentation
- `CODE_QUALITY_IMPROVEMENTS.md` - Original analysis and suggestions
- `REFACTORING_PLAN.md` - Step-by-step implementation guide
---
## Lessons Learned
1. **Start Small**: The refactoring started with the simplest tasks (extracting duplicates and constants) before tackling the larger route split.
2. **Verify as You Go**: Each task was verified before moving to the next, preventing cascading errors.
3. **Fix Imports Systematically**: When splitting routes, import paths needed careful correction. Using relative imports requires counting directory levels carefully.
4. **Maintain Type Safety**: Added type hints to all route registration functions for better IDE support and error detection.
5. **Test Endpoints**: Always test actual API endpoints after refactoring to ensure no functionality was broken.
---
## Future Improvement Opportunities
While not included in this refactoring, potential future improvements:
1. **Move Debug Endpoint**: Consider moving `/debug/camera-manager` to `camera_routes.py` for better organization
2. **Extract WebSocket Manager**: Could move `WebSocketManager` to a separate module if it grows
3. **Route Unit Tests**: Add unit tests for route modules to prevent regressions
4. **API Documentation**: Consider adding OpenAPI/Swagger tags to organize routes in API docs
5. **More Constants**: Consider extracting more magic numbers as the codebase evolves
---
## Overall Project Status
### Frontend (React Dashboard)
✅ **Microfrontend architecture implemented**
- `video-remote` module extracted and working
- Module Federation configured and tested
- Feature flags system in place for gradual rollout
- Foundation ready for extracting additional modules
### Backend (Camera Management API)
**Monolithic refactoring completed**
- Code organization significantly improved
- Routes split into domain modules
- Constants and utilities extracted
- 63% reduction in main server file size
- **Decision**: Maintained monolithic architecture (not split into microservices)
- All functionality preserved and verified
---
## Summary
### Frontend Modularization
**Microfrontend architecture established**
**Video library extracted as first module**
**Independent deployment pipeline ready**
**Scalable pattern for future feature extraction**
### Backend Refactoring
**3 refactoring tasks completed successfully**
**Code quality significantly improved**
**No functionality broken**
**63% reduction in main server file size**
**Better code organization and maintainability**
**Maintained monolithic architecture (intentional decision)**
### Overall
**Frontend: Microfrontends** (independent modules, Module Federation)
**Backend: Improved Monolith** (better organization, maintainability, no microservices)
The system now has a modular frontend with improved backend code quality, all while maintaining full backward compatibility and system stability.

View File

@@ -1,14 +1,12 @@
# Environment Configuration for Pecan Experiments Application
# Feature Flags
VITE_ENABLE_SHELL=true
VITE_ENABLE_VIDEO_MODULE=true
VITE_ENABLE_VISION_SYSTEM_MODULE=true
# USDA Vision Camera System API Configuration
# Recommended default: use a relative path so the dev server proxy routes to the API container
# Leave unset to default to "/api" (see vite.config.ts proxy)
# To override and point directly, set e.g.:
# VITE_VISION_API_URL=http://vm-host-or-ip:8000
# Remote Module URLs (use exp-dash hostname for Docker Compose networking)
VITE_VIDEO_REMOTE_URL=http://exp-dash:3001/assets/remoteEntry.js?v=$(date +%s)
VITE_VISION_SYSTEM_REMOTE_URL=http://exp-dash:3002/assets/remoteEntry.js?v=$(date +%s)
# Supabase Configuration (if needed for production)
# VITE_SUPABASE_URL=your_supabase_url
# VITE_SUPABASE_ANON_KEY=your_supabase_anon_key
# Development Configuration
# VITE_DEV_MODE=true
# API URLs
VITE_VISION_API_URL=http://exp-dash:8000
VITE_MEDIA_API_URL=http://exp-dash:8090

View File

@@ -5,7 +5,8 @@ import { DashboardHome } from './DashboardHome'
import { UserManagement } from './UserManagement'
import { ExperimentManagement } from './ExperimentManagement'
import { DataEntry } from './DataEntry'
import { VisionSystem } from './VisionSystem'
// VisionSystem is now loaded as a microfrontend - see RemoteVisionSystem below
// import { VisionSystem } from './VisionSystem'
import { Scheduling } from './Scheduling'
import React, { Suspense } from 'react'
import { loadRemoteComponent } from '../lib/loadRemote'
@@ -164,6 +165,13 @@ export function DashboardLayout({ onLogout, currentRoute }: DashboardLayoutProps
LocalVideoPlaceholder as any
) as unknown as React.ComponentType
const LocalVisionSystemPlaceholder = () => (<div className="p-6">Vision System module not enabled.</div>)
const RemoteVisionSystem = loadRemoteComponent(
isFeatureEnabled('enableVisionSystemModule'),
() => import('visionSystemRemote/App'),
LocalVisionSystemPlaceholder as any
) as unknown as React.ComponentType
const renderCurrentView = () => {
if (!user) return null
@@ -200,7 +208,13 @@ export function DashboardLayout({ onLogout, currentRoute }: DashboardLayoutProps
case 'data-entry':
return <DataEntry />
case 'vision-system':
return <VisionSystem />
return (
<ErrorBoundary fallback={<div className="p-6">Failed to load vision system module. Please try again.</div>}>
<Suspense fallback={<div className="p-6">Loading vision system module...</div>}>
<RemoteVisionSystem />
</Suspense>
</ErrorBoundary>
)
case 'scheduling':
return <Scheduling user={user} currentRoute={currentRoute} />
case 'video-library':

View File

@@ -3,6 +3,7 @@ export type FeatureFlags = {
enableVideoModule: boolean
enableExperimentModule: boolean
enableCameraModule: boolean
enableVisionSystemModule: boolean
}
const toBool = (v: unknown, fallback = false): boolean => {
@@ -19,6 +20,7 @@ export const featureFlags: FeatureFlags = {
enableVideoModule: toBool(import.meta.env.VITE_ENABLE_VIDEO_MODULE ?? false),
enableExperimentModule: toBool(import.meta.env.VITE_ENABLE_EXPERIMENT_MODULE ?? false),
enableCameraModule: toBool(import.meta.env.VITE_ENABLE_CAMERA_MODULE ?? false),
enableVisionSystemModule: toBool(import.meta.env.VITE_ENABLE_VISION_SYSTEM_MODULE ?? false),
}
export const isFeatureEnabled = (flag: keyof FeatureFlags): boolean => featureFlags[flag]

View File

@@ -19,7 +19,8 @@ export default defineConfig({
},
remotes: {
// Allow overriding by env; default to localhost for dev
videoRemote: process.env.VITE_VIDEO_REMOTE_URL || 'http://localhost:3001/assets/remoteEntry.js'
videoRemote: process.env.VITE_VIDEO_REMOTE_URL || 'http://localhost:3001/assets/remoteEntry.js',
visionSystemRemote: process.env.VITE_VISION_SYSTEM_REMOTE_URL || 'http://localhost:3002/assets/remoteEntry.js'
},
shared: {
react: { singleton: true, eager: true },

38
scripts/README.md Normal file
View File

@@ -0,0 +1,38 @@
# Scripts Directory
This directory contains utility scripts, test files, and diagnostic tools for the USDA Vision system.
## Shell Scripts (.sh)
- **`check_rtsp_status.sh`** - Quick RTSP streaming status check
- Starts RTSP stream for camera1
- Checks MediaMTX for stream availability
- Displays access URLs (WebRTC, RTSP)
- **`diagnose_rtsp.sh`** - Comprehensive RTSP diagnostic tool
- Performs full RTSP streaming health check
- Tests API health, camera status, FFmpeg process
- Verifies MediaMTX stream availability
- Provides detailed diagnostic output
## Test Files
- **`api-tests.http`** - REST Client test file for API endpoints
- Camera Management API tests
- Media API tests
- MediaMTX endpoint tests
- Can be used with REST Client extensions in VS Code/Cursor
- **`test_rtsp.py`** - Python script for testing RTSP streaming functionality
## Usage
All scripts assume you're running from the project root directory and that Docker Compose services are running.
### Example:
```bash
# From project root
./scripts/check_rtsp_status.sh
./scripts/diagnose_rtsp.sh
```

View File

@@ -0,0 +1,53 @@
#!/bin/bash
# Docker Compose Reset Script
# This script performs a complete reset of the Docker Compose environment:
# - Stops and removes containers, networks, and volumes
# - Prunes unused Docker resources (containers, images, networks, volumes)
# - Rebuilds and starts all services in detached mode
set -e # Exit on error
echo "=== Docker Compose Reset ==="
echo ""
# Get the project root directory (parent of scripts directory)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
PROJECT_ROOT="$(dirname "$SCRIPT_DIR")"
# Change to project root directory
cd "$PROJECT_ROOT"
echo "Working directory: $PROJECT_ROOT"
echo ""
echo "1. Stopping and removing containers, networks, and volumes..."
docker compose down -v
echo ""
echo "2. Pruning unused Docker resources..."
echo " - Pruning unused containers..."
docker container prune -f
echo " - Pruning unused images..."
docker image prune -af
echo " - Pruning unused networks..."
docker network prune -f
echo " - Pruning unused volumes..."
docker volume prune -f
echo ""
echo "3. Rebuilding and starting all services in detached mode..."
docker compose up --build -d
echo ""
echo "4. Checking service status..."
docker compose ps
echo ""
echo "=== Docker Compose Reset Complete ==="
echo ""
echo "All services have been reset and are running in detached mode."
echo "Use 'docker compose logs -f' to view logs or 'docker compose ps' to check status."

View File

@@ -0,0 +1,13 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vision System Remote</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

3994
vision-system-remote/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,31 @@
{
"name": "vision-system-remote",
"private": true,
"version": "0.0.1",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"build:watch": "vite build --watch",
"serve:dist": "serve -s dist -l 3002",
"preview": "vite preview --port 3002",
"dev:watch": "npm run build && (npm run build:watch &) && sleep 1 && npx http-server dist -p 3002 --cors -c-1"
},
"dependencies": {
"react": "^19.1.0",
"react-dom": "^19.1.0"
},
"devDependencies": {
"@originjs/vite-plugin-federation": "^1.3.3",
"@tailwindcss/vite": "^4.1.11",
"@types/react": "^19.2.2",
"@types/react-dom": "^19.2.2",
"@vitejs/plugin-react": "^4.6.0",
"http-server": "^14.1.1",
"serve": "^14.2.3",
"tailwindcss": "^4.1.11",
"typescript": "~5.8.3",
"vite": "^7.0.4"
}
}

View File

@@ -0,0 +1,466 @@
import React, { useState, useEffect, useCallback } from 'react'
import { useWebSocket } from './hooks/useWebSocket'
import { visionApi, type SystemStatus, type CameraStatus, type RecordingInfo } from './services/api'
import { SystemHealthWidget } from './widgets/SystemHealthWidget'
import { MqttStatusWidget } from './widgets/MqttStatusWidget'
import { RecordingsCountWidget } from './widgets/RecordingsCountWidget'
import { CameraCountWidget } from './widgets/CameraCountWidget'
import { CameraCard } from './components/CameraCard'
import { CameraPreviewModal } from './components/CameraPreviewModal'
import { CameraConfigModal } from './components/CameraConfigModal'
// Get WebSocket URL from environment or construct it
const getWebSocketUrl = () => {
const apiUrl = import.meta.env.VITE_VISION_API_URL || '/api'
// If it's a relative path, use relative WebSocket URL
if (apiUrl.startsWith('/')) {
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'
return `${protocol}//${window.location.host}${apiUrl.replace(/\/api$/, '')}/ws`
}
// Convert http(s):// to ws(s)://
const wsUrl = apiUrl.replace(/^http/, 'ws')
return `${wsUrl.replace(/\/api$/, '')}/ws`
}
export default function App() {
const [systemStatus, setSystemStatus] = useState<SystemStatus | null>(null)
const [cameras, setCameras] = useState<Record<string, CameraStatus>>({})
const [recordings, setRecordings] = useState<Record<string, RecordingInfo>>({})
const [loading, setLoading] = useState(true)
const [error, setError] = useState<string | null>(null)
const [lastUpdate, setLastUpdate] = useState<Date | null>(null)
const [notification, setNotification] = useState<{ type: 'success' | 'error', message: string } | null>(null)
// Modal states
const [previewModalOpen, setPreviewModalOpen] = useState(false)
const [previewCamera, setPreviewCamera] = useState<string | null>(null)
const [configModalOpen, setConfigModalOpen] = useState(false)
const [selectedCamera, setSelectedCamera] = useState<string | null>(null)
// WebSocket connection
const { isConnected, subscribe } = useWebSocket(getWebSocketUrl())
// Fetch initial data
const fetchInitialData = useCallback(async () => {
try {
setError(null)
const [status, camerasData, recordingsData] = await Promise.all([
visionApi.getSystemStatus(),
visionApi.getCameras(),
visionApi.getRecordings(),
])
setSystemStatus(status)
setCameras(camerasData)
setRecordings(recordingsData)
setLastUpdate(new Date())
} catch (err) {
setError(err instanceof Error ? err.message : 'Failed to fetch data')
console.error('Failed to fetch initial data:', err)
} finally {
setLoading(false)
}
}, [])
// Set up WebSocket subscriptions for real-time updates
useEffect(() => {
const unsubscribeFunctions: Array<() => void> = []
// Subscribe to camera status changes
unsubscribeFunctions.push(
subscribe('camera_status_changed', (event) => {
const { camera_name, status, is_recording } = event.data
setCameras((prev) => ({
...prev,
[camera_name]: {
...prev[camera_name],
status,
is_recording,
last_checked: new Date().toISOString(),
},
}))
setLastUpdate(new Date())
})
)
// Subscribe to recording started events
unsubscribeFunctions.push(
subscribe('recording_started', (event) => {
const { camera_name } = event.data
setCameras((prev) => ({
...prev,
[camera_name]: {
...prev[camera_name],
is_recording: true,
},
}))
// Refresh recordings to get accurate count
visionApi.getRecordings().then(setRecordings).catch(console.error)
// Refresh system status to update counts
visionApi.getSystemStatus().then(setSystemStatus).catch(console.error)
setLastUpdate(new Date())
})
)
// Subscribe to recording stopped events
unsubscribeFunctions.push(
subscribe('recording_stopped', (event) => {
const { camera_name } = event.data
setCameras((prev) => ({
...prev,
[camera_name]: {
...prev[camera_name],
is_recording: false,
},
}))
// Refresh recordings and system status
Promise.all([
visionApi.getRecordings(),
visionApi.getSystemStatus(),
]).then(([recordingsData, statusData]) => {
setRecordings(recordingsData)
setSystemStatus(statusData)
}).catch(console.error)
setLastUpdate(new Date())
})
)
// Subscribe to system status changes
unsubscribeFunctions.push(
subscribe('system_status_changed', () => {
visionApi.getSystemStatus().then(setSystemStatus).catch(console.error)
setLastUpdate(new Date())
})
)
// Subscribe to MQTT status changes
unsubscribeFunctions.push(
subscribe('mqtt_status_changed', () => {
visionApi.getSystemStatus().then(setSystemStatus).catch(console.error)
setLastUpdate(new Date())
})
)
return () => {
unsubscribeFunctions.forEach((unsub) => unsub())
}
}, [subscribe])
// Fetch initial data on mount
useEffect(() => {
fetchInitialData()
}, [fetchInitialData])
// Camera action handlers
const handleStartRecording = useCallback(async (cameraName: string) => {
try {
const timestamp = new Date().toISOString().replace(/[:.]/g, '-')
const filename = `manual_${cameraName}_${timestamp}.mp4`
const result = await visionApi.startRecording(cameraName, filename)
if (result.success) {
setNotification({ type: 'success', message: `Recording started: ${result.filename}` })
// Immediately update state optimistically (UI updates instantly)
setCameras((prev) => ({
...prev,
[cameraName]: {
...prev[cameraName],
is_recording: true,
current_recording_file: result.filename,
},
}))
// Refresh camera status from API as backup (in case WebSocket is delayed)
setTimeout(() => {
visionApi.getCameras().then(setCameras).catch(console.error)
}, 500)
} else {
setNotification({ type: 'error', message: `Failed: ${result.message}` })
}
} catch (err) {
setNotification({ type: 'error', message: err instanceof Error ? err.message : 'Unknown error' })
}
}, [])
const handleStopRecording = useCallback(async (cameraName: string) => {
try {
const result = await visionApi.stopRecording(cameraName)
if (result.success) {
setNotification({ type: 'success', message: 'Recording stopped' })
// Immediately update state optimistically (UI updates instantly)
setCameras((prev) => ({
...prev,
[cameraName]: {
...prev[cameraName],
is_recording: false,
current_recording_file: null,
},
}))
// Refresh camera status from API as backup (in case WebSocket is delayed)
setTimeout(() => {
visionApi.getCameras().then(setCameras).catch(console.error)
}, 500)
} else {
setNotification({ type: 'error', message: `Failed: ${result.message}` })
}
} catch (err) {
setNotification({ type: 'error', message: err instanceof Error ? err.message : 'Unknown error' })
}
}, [])
const handlePreviewModal = useCallback((cameraName: string) => {
setPreviewCamera(cameraName)
setPreviewModalOpen(true)
}, [])
const handlePreviewNewWindow = useCallback((cameraName: string) => {
// Open camera stream in new window/tab
const streamUrl = visionApi.getStreamUrl(cameraName)
window.open(streamUrl, '_blank')
}, [])
const handleConfigure = useCallback((cameraName: string) => {
setSelectedCamera(cameraName)
setConfigModalOpen(true)
}, [])
const handleRestart = useCallback(async (cameraName: string) => {
try {
setNotification({ type: 'success', message: `Restarting camera ${cameraName}...` })
const result = await visionApi.reinitializeCamera(cameraName)
if (result.success) {
setNotification({ type: 'success', message: `Camera ${cameraName} restarted successfully` })
// Refresh camera status
setTimeout(() => {
visionApi.getCameras().then(setCameras).catch(console.error)
visionApi.getSystemStatus().then(setSystemStatus).catch(console.error)
}, 2000) // Wait 2 seconds for camera to reinitialize
} else {
setNotification({ type: 'error', message: `Failed: ${result.message}` })
}
} catch (err) {
setNotification({ type: 'error', message: err instanceof Error ? err.message : 'Unknown error' })
}
}, [])
const handleStopStreaming = useCallback(async (cameraName: string) => {
try {
const result = await visionApi.stopStream(cameraName)
if (result.success) {
setNotification({ type: 'success', message: 'Streaming stopped' })
// Refresh camera status
visionApi.getCameras().then(setCameras).catch(console.error)
} else {
setNotification({ type: 'error', message: `Failed: ${result.message}` })
}
} catch (err) {
setNotification({ type: 'error', message: err instanceof Error ? err.message : 'Unknown error' })
}
}, [])
// Auto-hide notifications
useEffect(() => {
if (notification) {
const timer = setTimeout(() => setNotification(null), 5000)
return () => clearTimeout(timer)
}
}, [notification])
if (loading) {
return (
<div className="p-6">
<div className="flex items-center justify-center h-64">
<div className="text-center">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-indigo-600 mx-auto" />
<p className="mt-4 text-gray-600">Loading vision system...</p>
</div>
</div>
</div>
)
}
if (error) {
return (
<div className="p-6">
<div className="bg-red-50 border border-red-200 rounded-md p-4">
<div className="flex">
<div className="flex-shrink-0">
<svg className="h-5 w-5 text-red-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z" clipRule="evenodd" />
</svg>
</div>
<div className="ml-3">
<h3 className="text-sm font-medium text-red-800">Error loading vision system</h3>
<div className="mt-2 text-sm text-red-700">
<p>{error}</p>
</div>
<div className="mt-4">
<button
onClick={fetchInitialData}
className="bg-red-100 px-3 py-2 rounded-md text-sm font-medium text-red-800 hover:bg-red-200"
>
Retry
</button>
</div>
</div>
</div>
</div>
</div>
)
}
const cameraCount = Object.keys(cameras).length
const machineCount = systemStatus ? Object.keys(systemStatus.machines).length : 0
const activeRecordings = systemStatus?.active_recordings ?? 0
// Fix: Use recordings object length instead of total_recordings (which may be incorrect)
const totalRecordings = Object.keys(recordings).length
return (
<div className="p-6 space-y-6">
{/* Header */}
<div className="flex items-center justify-between">
<div>
<h1 className="text-3xl font-bold text-gray-900">Vision System</h1>
<p className="mt-2 text-gray-600">Monitor cameras, machines, and recording status</p>
{lastUpdate && (
<p className="mt-1 text-sm text-gray-500 flex items-center space-x-2">
<span>Last updated: {lastUpdate.toLocaleTimeString()}</span>
{isConnected ? (
<span className="inline-flex items-center px-2 py-0.5 rounded-full text-xs font-medium bg-green-100 text-green-800">
<span className="w-2 h-2 bg-green-500 rounded-full mr-1 animate-pulse" />
Live Updates
</span>
) : (
<span className="inline-flex items-center px-2 py-0.5 rounded-full text-xs font-medium bg-yellow-100 text-yellow-800">
Polling Mode
</span>
)}
</p>
)}
</div>
</div>
{/* Status Widgets */}
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-4 gap-6">
<SystemHealthWidget systemStatus={systemStatus} />
<MqttStatusWidget systemStatus={systemStatus} />
<RecordingsCountWidget active={activeRecordings} total={totalRecordings} />
<CameraCountWidget cameraCount={cameraCount} machineCount={machineCount} />
</div>
{/* Cameras Grid */}
<div className="bg-white shadow rounded-lg">
<div className="px-4 py-5 sm:px-6 border-b border-gray-200">
<h3 className="text-lg leading-6 font-medium text-gray-900">Cameras</h3>
<p className="mt-1 max-w-2xl text-sm text-gray-500">
Current status of all cameras in the system
</p>
</div>
<div className="p-6">
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-6">
{Object.entries(cameras).map(([cameraName, camera]) => (
<CameraCard
key={cameraName}
cameraName={cameraName}
camera={camera}
onStartRecording={handleStartRecording}
onStopRecording={handleStopRecording}
onPreviewModal={handlePreviewModal}
onPreviewNewWindow={handlePreviewNewWindow}
onStopStreaming={handleStopStreaming}
onConfigure={handleConfigure}
onRestart={handleRestart}
/>
))}
</div>
</div>
</div>
{/* Notification */}
{notification && (
<div
className={`fixed top-4 right-4 z-[999999] p-4 rounded-md shadow-lg ${
notification.type === 'success'
? 'bg-green-50 border border-green-200 text-green-800'
: 'bg-red-50 border border-red-200 text-red-800'
}`}
>
<div className="flex items-center">
<div className="flex-shrink-0">
{notification.type === 'success' ? (
<svg className="h-5 w-5 text-green-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zm3.707-9.293a1 1 0 00-1.414-1.414L9 10.586 7.707 9.293a1 1 0 00-1.414 1.414l2 2a1 1 0 001.414 0l4-4z" clipRule="evenodd" />
</svg>
) : (
<svg className="h-5 w-5 text-red-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z" clipRule="evenodd" />
</svg>
)}
</div>
<div className="ml-3">
<p className="text-sm font-medium">{notification.message}</p>
</div>
<div className="ml-auto pl-3">
<button
onClick={() => setNotification(null)}
className={`inline-flex rounded-md p-1.5 focus:outline-none focus:ring-2 focus:ring-offset-2 ${
notification.type === 'success'
? 'text-green-500 hover:bg-green-100 focus:ring-green-600'
: 'text-red-500 hover:bg-red-100 focus:ring-red-600'
}`}
>
<svg className="h-4 w-4" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M4.293 4.293a1 1 0 011.414 0L10 8.586l4.293-4.293a1 1 0 111.414 1.414L11.414 10l4.293 4.293a1 1 0 01-1.414 1.414L10 11.414l-4.293 4.293a1 1 0 01-1.414-1.414L8.586 10 4.293 5.707a1 1 0 010-1.414z" clipRule="evenodd" />
</svg>
</button>
</div>
</div>
</div>
)}
{/* Camera Preview Modal */}
{previewCamera && (
<CameraPreviewModal
cameraName={previewCamera}
isOpen={previewModalOpen}
onClose={() => {
setPreviewModalOpen(false)
setPreviewCamera(null)
}}
/>
)}
{/* Camera Configuration Modal */}
{selectedCamera && (
<CameraConfigModal
cameraName={selectedCamera}
isOpen={configModalOpen}
onClose={() => {
setConfigModalOpen(false)
setSelectedCamera(null)
}}
onSuccess={(message) => {
setNotification({ type: 'success', message })
// Refresh camera status
visionApi.getCameras().then(setCameras).catch(console.error)
}}
onError={(error) => {
setNotification({ type: 'error', message: error })
}}
/>
)}
</div>
)
}

View File

@@ -0,0 +1,245 @@
import React from 'react'
import type { CameraStatus } from '../services/api'
interface CameraCardProps {
cameraName: string
camera: CameraStatus
onStartRecording: (cameraName: string) => void
onStopRecording: (cameraName: string) => void
onPreviewModal: (cameraName: string) => void
onPreviewNewWindow: (cameraName: string) => void
onStopStreaming: (cameraName: string) => void
onConfigure: (cameraName: string) => void
onRestart: (cameraName: string) => void
}
export const CameraCard: React.FC<CameraCardProps> = ({
cameraName,
camera,
onStartRecording,
onStopRecording,
onPreviewModal,
onPreviewNewWindow,
onStopStreaming,
onConfigure,
onRestart,
}) => {
const friendlyName = camera.device_info?.friendly_name || cameraName
const isConnected = camera.status === 'available' || camera.status === 'connected' || camera.status === 'streaming'
const hasError = camera.status === 'error'
const isStreaming = camera.status === 'streaming'
const isRecording = camera.is_recording
const needsRestart = hasError || camera.status === 'crashed' || camera.status === 'failed'
const getStatusColor = () => {
if (isRecording) return 'bg-red-500'
if (isStreaming) return 'bg-blue-500'
if (isConnected) return 'bg-green-500'
if (hasError) return 'bg-yellow-500'
return 'bg-gray-400'
}
const getStatusText = () => {
if (isRecording) return 'Recording'
if (isStreaming) return 'Streaming'
if (isConnected) return 'Connected'
if (hasError) return 'Error'
return 'Offline'
}
return (
<div className="bg-white rounded-xl shadow-md hover:shadow-lg transition-shadow duration-200 border border-gray-200 overflow-hidden">
{/* Header with Status Indicator */}
<div className="px-6 py-4 border-b border-gray-200 bg-gradient-to-r from-gray-50 to-white">
<div className="flex items-center justify-between">
<div className="flex items-center space-x-3">
<div className={`w-3 h-3 rounded-full ${getStatusColor()} ${isRecording || isStreaming ? 'animate-pulse' : ''}`} />
<div>
<h3 className="text-lg font-semibold text-gray-900">{friendlyName}</h3>
{friendlyName !== cameraName && (
<p className="text-xs text-gray-500 font-mono">{cameraName}</p>
)}
</div>
</div>
<span
className={`inline-flex items-center px-3 py-1 rounded-full text-xs font-medium ${
isRecording
? 'bg-red-100 text-red-800'
: isStreaming
? 'bg-blue-100 text-blue-800'
: isConnected
? 'bg-green-100 text-green-800'
: hasError
? 'bg-yellow-100 text-yellow-800'
: 'bg-gray-100 text-gray-800'
}`}
>
{getStatusText()}
</span>
</div>
</div>
{/* Camera Details */}
<div className="px-6 py-4 space-y-3">
{/* Status Info */}
<div className="grid grid-cols-2 gap-3 text-sm">
{camera.device_info?.serial_number && (
<div>
<span className="text-gray-500">Serial:</span>
<span className="ml-2 font-mono text-xs text-gray-900">
{camera.device_info.serial_number}
</span>
</div>
)}
{camera.frame_rate && (
<div>
<span className="text-gray-500">FPS:</span>
<span className="ml-2 font-semibold text-gray-900">{camera.frame_rate.toFixed(1)}</span>
</div>
)}
</div>
{/* Recording Indicator */}
{isRecording && (
<div className="flex items-center space-x-2 px-3 py-2 bg-red-50 border border-red-200 rounded-lg">
<div className="w-2 h-2 bg-red-500 rounded-full animate-pulse" />
<span className="text-sm font-medium text-red-800">Recording Active</span>
{camera.current_recording_file && (
<span className="text-xs text-red-600 truncate ml-auto">
{camera.current_recording_file.split('/').pop()}
</span>
)}
</div>
)}
{/* Error Display */}
{camera.last_error && (
<div className="px-3 py-2 bg-yellow-50 border border-yellow-200 rounded-lg">
<p className="text-xs text-yellow-800">
<strong>Error:</strong> {camera.last_error}
</p>
</div>
)}
{/* Action Buttons */}
<div className="pt-3 space-y-2 border-t border-gray-200">
{/* Recording Controls */}
<div className="flex space-x-2">
{!isRecording ? (
<button
onClick={() => onStartRecording(cameraName)}
disabled={!isConnected}
className={`flex-1 px-4 py-2 text-sm font-medium rounded-lg transition-colors ${
isConnected
? 'bg-green-600 text-white hover:bg-green-700 focus:outline-none focus:ring-2 focus:ring-green-500 focus:ring-offset-2'
: 'bg-gray-100 text-gray-400 cursor-not-allowed'
}`}
>
<span className="flex items-center justify-center">
<svg className="w-4 h-4 mr-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M15 10l4.553-2.276A1 1 0 0121 8.618v6.764a1 1 0 01-1.447.894L15 14M5 18h8a2 2 0 002-2V8a2 2 0 00-2-2H5a2 2 0 00-2 2v8a2 2 0 002 2z" />
</svg>
Record
</span>
</button>
) : (
<button
onClick={() => onStopRecording(cameraName)}
className="flex-1 px-4 py-2 text-sm font-medium text-white bg-red-600 rounded-lg hover:bg-red-700 focus:outline-none focus:ring-2 focus:ring-red-500 focus:ring-offset-2 transition-colors"
>
<span className="flex items-center justify-center">
<svg className="w-4 h-4 mr-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 9h6v6H9z" />
</svg>
Stop
</span>
</button>
)}
</div>
{/* Preview and Stream Controls */}
<div className="flex space-x-2">
<button
onClick={() => onPreviewModal(cameraName)}
disabled={!isConnected}
className={`flex-1 px-4 py-2 text-sm font-medium rounded-lg transition-colors ${
isConnected
? 'bg-blue-600 text-white hover:bg-blue-700 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2'
: 'bg-gray-100 text-gray-400 cursor-not-allowed'
}`}
>
<span className="flex items-center justify-center">
<svg className="w-4 h-4 mr-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M15 10l4.553-2.276A1 1 0 0121 8.618v6.764a1 1 0 01-1.447.894L15 14M5 18h8a2 2 0 002-2V8a2 2 0 00-2-2H5a2 2 0 00-2 2v8a2 2 0 002 2z" />
</svg>
Preview
</span>
</button>
<button
onClick={() => onPreviewNewWindow(cameraName)}
disabled={!isConnected}
className={`px-3 py-2 text-sm font-medium rounded-lg transition-colors ${
isConnected
? 'bg-blue-500 text-white hover:bg-blue-600 focus:outline-none focus:ring-2 focus:ring-blue-500 focus:ring-offset-2'
: 'bg-gray-100 text-gray-400 cursor-not-allowed'
}`}
title="Open preview in new window"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M10 6H6a2 2 0 00-2 2v10a2 2 0 002 2h10a2 2 0 002-2v-4M14 4h6m0 0v6m0-6L10 14" />
</svg>
</button>
{isStreaming && (
<button
onClick={() => onStopStreaming(cameraName)}
className="px-3 py-2 text-sm font-medium text-white bg-orange-600 rounded-lg hover:bg-orange-700 focus:outline-none focus:ring-2 focus:ring-orange-500 focus:ring-offset-2 transition-colors"
title="Stop streaming"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
)}
</div>
{/* Error Recovery - Restart Button */}
{needsRestart && (
<div className="mt-2 pt-2 border-t border-gray-200">
<button
onClick={() => onRestart(cameraName)}
className="w-full px-4 py-2 text-sm font-medium text-white bg-orange-600 rounded-lg hover:bg-orange-700 focus:outline-none focus:ring-2 focus:ring-orange-500 focus:ring-offset-2 transition-colors"
>
<span className="flex items-center justify-center">
<svg className="w-4 h-4 mr-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 4v5h.582m15.356 2A8.001 8.001 0 004.582 9m0 0H9m11 11v-5h-.581m0 0a8.003 8.003 0 01-15.357-2m15.357 2H15" />
</svg>
Restart Camera
</span>
</button>
</div>
)}
{/* Configuration Button */}
<div className="mt-3 pt-3 border-t border-gray-200">
<button
onClick={() => onConfigure(cameraName)}
className="w-full px-4 py-2 text-sm font-medium text-indigo-600 bg-indigo-50 rounded-lg hover:bg-indigo-100 focus:outline-none focus:ring-2 focus:ring-indigo-500 focus:ring-offset-2 transition-colors"
>
<span className="flex items-center justify-center">
<svg className="w-4 h-4 mr-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M10.325 4.317c.426-1.756 2.924-1.756 3.35 0a1.724 1.724 0 002.573 1.066c1.543-.94 3.31.826 2.37 2.37a1.724 1.724 0 001.065 2.572c1.756.426 1.756 2.924 0 3.35a1.724 1.724 0 00-1.066 2.573c.94 1.543-.826 3.31-2.37 2.37a1.724 1.724 0 00-2.572 1.065c-.426 1.756-2.924 1.756-3.35 0a1.724 1.724 0 00-2.573-1.066c-1.543.94-3.31-.826-2.37-2.37a1.724 1.724 0 00-1.065-2.572c-1.756-.426-1.756-2.924 0-3.35a1.724 1.724 0 001.066-2.573c-.94-1.543.826-3.31 2.37-2.37.996.608 2.296.07 2.572-1.065z" />
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M15 12a3 3 0 11-6 0 3 3 0 016 0z" />
</svg>
Configure
</span>
</button>
</div>
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,768 @@
import React, { useState, useEffect } from 'react'
import { visionApi, type CameraConfig, type CameraConfigUpdate } from '../services/api'
interface CameraConfigModalProps {
cameraName: string
isOpen: boolean
onClose: () => void
onSuccess?: (message: string) => void
onError?: (error: string) => void
}
export const CameraConfigModal: React.FC<CameraConfigModalProps> = ({ cameraName, isOpen, onClose, onSuccess, onError }) => {
const [config, setConfig] = useState<CameraConfig | null>(null)
const [loading, setLoading] = useState(false)
const [saving, setSaving] = useState(false)
const [error, setError] = useState<string | null>(null)
const [hasChanges, setHasChanges] = useState(false)
const [originalConfig, setOriginalConfig] = useState<CameraConfig | null>(null)
useEffect(() => {
if (isOpen && cameraName) {
loadConfig()
}
}, [isOpen, cameraName])
const loadConfig = async () => {
try {
setLoading(true)
setError(null)
const configData = await visionApi.getCameraConfig(cameraName)
// Map API field names to UI expected field names
const configWithDefaults = {
...configData,
// Map auto_start_recording_enabled from API to auto_record_on_machine_start for UI
auto_record_on_machine_start: configData.auto_start_recording_enabled ?? false,
}
setConfig(configWithDefaults as CameraConfig)
setOriginalConfig(configWithDefaults as CameraConfig)
setHasChanges(false)
} catch (err) {
let errorMessage = 'Failed to load camera configuration'
if (err instanceof Error) {
errorMessage = err.message
// Handle specific API validation errors for missing video format fields
if (err.message.includes('video_format') || err.message.includes('video_codec') || err.message.includes('video_quality')) {
errorMessage = 'Camera configuration is missing video format settings. This may indicate the backend needs to be updated to support MP4 format. Using default values.'
// Create a default configuration for display
const defaultConfig = {
name: cameraName,
machine_topic: '',
storage_path: '',
enabled: true,
auto_record_on_machine_start: false,
auto_start_recording_enabled: false,
auto_recording_max_retries: 3,
auto_recording_retry_delay_seconds: 2,
exposure_ms: 1.0,
gain: 3.5,
target_fps: 0,
video_format: 'mp4',
video_codec: 'mp4v',
video_quality: 95,
sharpness: 120,
contrast: 110,
saturation: 100,
gamma: 100,
noise_filter_enabled: true,
denoise_3d_enabled: false,
auto_white_balance: true,
color_temperature_preset: 0,
anti_flicker_enabled: true,
light_frequency: 1,
bit_depth: 8,
hdr_enabled: false,
hdr_gain_mode: 0,
}
setConfig(defaultConfig)
setOriginalConfig(defaultConfig)
setHasChanges(false)
setError(errorMessage)
return
}
}
setError(errorMessage)
onError?.(errorMessage)
} finally {
setLoading(false)
}
}
const updateSetting = (key: keyof CameraConfigUpdate, value: number | boolean | string) => {
if (!config) return
const newConfig = { ...config, [key]: value }
setConfig(newConfig)
// Check if there are changes from original
const hasChanges = originalConfig && Object.keys(newConfig).some(k => {
const configKey = k as keyof CameraConfig
return newConfig[configKey] !== originalConfig[configKey]
})
setHasChanges(!!hasChanges)
// Video format settings are read-only, no validation needed
}
const saveConfig = async () => {
if (!config || !originalConfig) return
try {
setSaving(true)
setError(null)
// Build update object with only changed values
const updates: CameraConfigUpdate = {}
const configKeys: (keyof CameraConfigUpdate)[] = [
'exposure_ms', 'gain', 'target_fps', 'sharpness', 'contrast', 'saturation',
'gamma', 'noise_filter_enabled', 'denoise_3d_enabled', 'auto_white_balance',
'color_temperature_preset', 'anti_flicker_enabled', 'light_frequency',
'hdr_enabled', 'hdr_gain_mode', 'auto_record_on_machine_start',
'auto_start_recording_enabled', 'auto_recording_max_retries', 'auto_recording_retry_delay_seconds'
]
configKeys.forEach(key => {
if (config[key] !== originalConfig[key]) {
// Map auto_record_on_machine_start back to auto_start_recording_enabled for API
if (key === 'auto_record_on_machine_start') {
updates.auto_start_recording_enabled = config[key] as boolean
} else {
updates[key] = config[key] as any
}
}
})
// Remove auto_record_on_machine_start if it was added, as it's not an API field
if ('auto_record_on_machine_start' in updates) {
delete updates.auto_record_on_machine_start
}
if (Object.keys(updates).length === 0) {
onSuccess?.('No changes to save')
return
}
const result = await visionApi.updateCameraConfig(cameraName, updates)
if (result.success) {
setOriginalConfig(config)
setHasChanges(false)
onSuccess?.(`Configuration updated: ${result.updated_settings.join(', ')}`)
} else {
throw new Error(result.message)
}
} catch (err) {
const errorMessage = err instanceof Error ? err.message : 'Failed to save configuration'
setError(errorMessage)
onError?.(errorMessage)
} finally {
setSaving(false)
}
}
const resetChanges = () => {
if (originalConfig) {
setConfig(originalConfig)
setHasChanges(false)
}
}
if (!isOpen) return null
return (
<div className="fixed inset-0 flex items-center justify-center overflow-y-auto modal z-999999">
<div
className="fixed inset-0 h-full w-full bg-gray-400/50 backdrop-blur-[2px]"
onClick={onClose}
/>
<div className="relative w-full rounded-2xl bg-white shadow-theme-xl dark:bg-gray-900 max-w-4xl mx-4 max-h-[90vh] overflow-hidden" onClick={(e) => e.stopPropagation()}>
{/* Close Button */}
<button
onClick={onClose}
className="absolute right-3 top-3 z-999 flex h-9.5 w-9.5 items-center justify-center rounded-full bg-gray-100 text-gray-400 transition-colors hover:bg-gray-200 hover:text-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700 dark:hover:text-white sm:right-6 sm:top-6 sm:h-11 sm:w-11"
>
<svg
width="24"
height="24"
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
>
<path
fillRule="evenodd"
clipRule="evenodd"
d="M6.04289 16.5413C5.65237 16.9318 5.65237 17.565 6.04289 17.9555C6.43342 18.346 7.06658 18.346 7.45711 17.9555L11.9987 13.4139L16.5408 17.956C16.9313 18.3466 17.5645 18.3466 17.955 17.956C18.3455 17.5655 18.3455 16.9323 17.955 16.5418L13.4129 11.9997L17.955 7.4576C18.3455 7.06707 18.3455 6.43391 17.955 6.04338C17.5645 5.65286 16.9313 5.65286 16.5408 6.04338L11.9987 10.5855L7.45711 6.0439C7.06658 5.65338 6.43342 5.65338 6.04289 6.0439C5.65237 6.43442 5.65237 7.06759 6.04289 7.45811L10.5845 11.9997L6.04289 16.5413Z"
fill="currentColor"
/>
</svg>
</button>
{/* Header */}
<div className="px-6 py-4 border-b border-gray-200 dark:border-gray-800">
<div className="flex items-center justify-between">
<h3 className="text-lg font-medium text-gray-900 dark:text-white/90">
Camera Configuration - {cameraName}
</h3>
</div>
</div>
{/* Content */}
<div className="px-6 py-4 overflow-y-auto max-h-[calc(90vh-140px)]">
{loading && (
<div className="flex items-center justify-center py-8">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-indigo-600"></div>
<span className="ml-2 text-gray-600">Loading configuration...</span>
</div>
)}
{error && (
<div className="mb-4 p-4 bg-red-50 border border-red-200 rounded-md">
<div className="flex">
<div className="flex-shrink-0">
<svg className="h-5 w-5 text-red-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z" clipRule="evenodd" />
</svg>
</div>
<div className="ml-3">
<h3 className="text-sm font-medium text-red-800">Configuration Error</h3>
<p className="mt-2 text-sm text-red-700">{error}</p>
{error.includes('video_format') && (
<p className="mt-2 text-sm text-red-600">
<strong>Note:</strong> The video format settings are displayed with default values.
You can still modify and save the configuration, but the backend may need to be updated
to fully support MP4 format settings.
</p>
)}
</div>
</div>
</div>
)}
{config && !loading && (
<div className="space-y-6">
{/* System Information (Read-Only) */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">System Information</h4>
<div className="bg-gray-50 border border-gray-200 rounded-md p-4">
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Camera Name</label>
<div className="text-sm text-gray-900 font-medium">{config.name}</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Machine Topic</label>
<div className="text-sm text-gray-900 font-medium">{config.machine_topic}</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Storage Path</label>
<div className="text-sm text-gray-900 font-medium">{config.storage_path}</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Status</label>
<div className="text-sm text-gray-900 font-medium">
<span className={`inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${config.enabled ? 'bg-green-100 text-green-800' : 'bg-red-100 text-red-800'
}`}>
{config.enabled ? 'Enabled' : 'Disabled'}
</span>
</div>
</div>
</div>
</div>
</div>
{/* Auto-Recording Settings (Read-Only) */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">Auto-Recording Settings</h4>
<div className="bg-gray-50 border border-gray-200 rounded-md p-4">
<div className="grid grid-cols-1 md:grid-cols-3 gap-4">
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Auto Recording</label>
<div className="text-sm text-gray-900 font-medium">
<span className={`inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${config.auto_start_recording_enabled ? 'bg-green-100 text-green-800' : 'bg-red-100 text-red-800'
}`}>
{config.auto_start_recording_enabled ? 'Enabled' : 'Disabled'}
</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Max Retries</label>
<div className="text-sm text-gray-900 font-medium">{config.auto_recording_max_retries}</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">Retry Delay</label>
<div className="text-sm text-gray-900 font-medium">{config.auto_recording_retry_delay_seconds}s</div>
</div>
</div>
<p className="text-xs text-gray-500 mt-3">Auto-recording settings are configured in the system configuration file</p>
</div>
</div>
{/* Basic Settings */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">Basic Settings</h4>
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Exposure (ms): {config.exposure_ms}
</label>
<input
type="range"
min="0.1"
max="10"
step="0.1"
value={config.exposure_ms}
onChange={(e) => updateSetting('exposure_ms', parseFloat(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0.1ms</span>
<span>10ms</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Gain: {config.gain}
</label>
<input
type="range"
min="0"
max="10"
step="0.1"
value={config.gain}
onChange={(e) => updateSetting('gain', parseFloat(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0</span>
<span>10</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Target FPS: {config.target_fps} {config.target_fps === 0 ? '(Maximum)' : ''}
</label>
<input
type="range"
min="0"
max="30"
step="1"
value={config.target_fps}
onChange={(e) => updateSetting('target_fps', parseInt(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0 (Max)</span>
<span>30</span>
</div>
</div>
</div>
</div>
{/* Image Quality Settings */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">Image Quality</h4>
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Sharpness: {config.sharpness}
</label>
<input
type="range"
min="0"
max="200"
value={config.sharpness}
onChange={(e) => updateSetting('sharpness', parseInt(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0</span>
<span>200</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Contrast: {config.contrast}
</label>
<input
type="range"
min="0"
max="200"
value={config.contrast}
onChange={(e) => updateSetting('contrast', parseInt(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0</span>
<span>200</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Saturation: {config.saturation}
</label>
<input
type="range"
min="0"
max="200"
value={config.saturation}
onChange={(e) => updateSetting('saturation', parseInt(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0</span>
<span>200</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Gamma: {config.gamma}
</label>
<input
type="range"
min="0"
max="300"
value={config.gamma}
onChange={(e) => updateSetting('gamma', parseInt(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0</span>
<span>300</span>
</div>
</div>
</div>
</div>
{/* Color Settings */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">Color Settings</h4>
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label className="flex items-center space-x-2">
<input
type="checkbox"
checked={config.auto_white_balance}
onChange={(e) => updateSetting('auto_white_balance', e.target.checked)}
className="rounded border-gray-300 text-indigo-600 focus:ring-indigo-500"
/>
<span className="text-sm font-medium text-gray-700">Auto White Balance</span>
</label>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Color Temperature Preset: {config.color_temperature_preset} {config.color_temperature_preset === 0 ? '(Auto)' : ''}
</label>
<input
type="range"
min="0"
max="10"
value={config.color_temperature_preset}
onChange={(e) => updateSetting('color_temperature_preset', parseInt(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0 (Auto)</span>
<span>10</span>
</div>
</div>
</div>
</div>
{/* White Balance RGB Gains */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">White Balance RGB Gains</h4>
<div className="grid grid-cols-1 md:grid-cols-3 gap-4">
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Red Gain: {config.wb_red_gain?.toFixed(2) || '1.00'}
</label>
<input
type="range"
min="0"
max="3.99"
step="0.01"
value={config.wb_red_gain || 1.0}
onChange={(e) => updateSetting('wb_red_gain', parseFloat(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0.00</span>
<span>3.99</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Green Gain: {config.wb_green_gain?.toFixed(2) || '1.00'}
</label>
<input
type="range"
min="0"
max="3.99"
step="0.01"
value={config.wb_green_gain || 1.0}
onChange={(e) => updateSetting('wb_green_gain', parseFloat(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0.00</span>
<span>3.99</span>
</div>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Blue Gain: {config.wb_blue_gain?.toFixed(2) || '1.00'}
</label>
<input
type="range"
min="0"
max="3.99"
step="0.01"
value={config.wb_blue_gain || 1.0}
onChange={(e) => updateSetting('wb_blue_gain', parseFloat(e.target.value))}
className="w-full"
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0.00</span>
<span>3.99</span>
</div>
</div>
</div>
<p className="text-xs text-gray-500 mt-2">Manual white balance gains (only effective when Auto White Balance is disabled)</p>
</div>
{/* Advanced Settings */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">Advanced Settings</h4>
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label className="flex items-center space-x-2">
<input
type="checkbox"
checked={config.anti_flicker_enabled}
onChange={(e) => updateSetting('anti_flicker_enabled', e.target.checked)}
className="rounded border-gray-300 text-indigo-600 focus:ring-indigo-500"
/>
<span className="text-sm font-medium text-gray-700">Anti-flicker Enabled</span>
</label>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Light Frequency: {config.light_frequency === 0 ? '50Hz' : '60Hz'}
</label>
<select
value={config.light_frequency}
onChange={(e) => updateSetting('light_frequency', parseInt(e.target.value))}
className="w-full border-gray-300 rounded-md focus:ring-indigo-500 focus:border-indigo-500"
>
<option value={0}>50Hz</option>
<option value={1}>60Hz</option>
</select>
</div>
<div>
<label className="flex items-center space-x-2">
<input
type="checkbox"
checked={config.noise_filter_enabled}
onChange={(e) => updateSetting('noise_filter_enabled', e.target.checked)}
className="rounded border-gray-300 text-indigo-600 focus:ring-indigo-500"
/>
<span className="text-sm font-medium text-gray-700">Noise Filter Enabled</span>
</label>
<p className="text-xs text-gray-500 mt-1">Requires restart to apply</p>
</div>
<div>
<label className="flex items-center space-x-2">
<input
type="checkbox"
checked={config.denoise_3d_enabled}
onChange={(e) => updateSetting('denoise_3d_enabled', e.target.checked)}
className="rounded border-gray-300 text-indigo-600 focus:ring-indigo-500"
/>
<span className="text-sm font-medium text-gray-700">3D Denoise Enabled</span>
</label>
<p className="text-xs text-gray-500 mt-1">Requires restart to apply</p>
</div>
</div>
</div>
{/* HDR Settings */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">HDR Settings</h4>
<div className="grid grid-cols-1 md:grid-cols-2 gap-4">
<div>
<label className="flex items-center space-x-2">
<input
type="checkbox"
checked={config.hdr_enabled}
onChange={(e) => updateSetting('hdr_enabled', e.target.checked)}
className="rounded border-gray-300 text-indigo-600 focus:ring-indigo-500"
/>
<span className="text-sm font-medium text-gray-700">HDR Enabled</span>
</label>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
HDR Gain Mode: {config.hdr_gain_mode}
</label>
<input
type="range"
min="0"
max="3"
value={config.hdr_gain_mode}
onChange={(e) => updateSetting('hdr_gain_mode', parseInt(e.target.value))}
className="w-full"
disabled={!config.hdr_enabled}
/>
<div className="flex justify-between text-xs text-gray-500 mt-1">
<span>0</span>
<span>3</span>
</div>
</div>
</div>
</div>
{/* Video Recording Settings (Read-Only) */}
<div>
<h4 className="text-md font-medium text-gray-900 mb-4">Video Recording Settings</h4>
<div className="bg-gray-50 border border-gray-200 rounded-md p-4">
<div className="grid grid-cols-1 md:grid-cols-3 gap-4">
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Video Format
</label>
<div className="text-sm text-gray-900 font-medium">
{config.video_format?.toUpperCase() || 'MP4'}
</div>
<p className="text-xs text-gray-500">Current recording format</p>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Video Codec
</label>
<div className="text-sm text-gray-900 font-medium">
{config.video_codec?.toUpperCase() || 'MP4V'}
</div>
<p className="text-xs text-gray-500">Compression codec</p>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-1">
Video Quality
</label>
<div className="text-sm text-gray-900 font-medium">
{config.video_quality || 95}%
</div>
<p className="text-xs text-gray-500">Recording quality</p>
</div>
</div>
<div className="mt-4 p-3 bg-blue-50 border border-blue-200 rounded-md">
<div className="flex">
<div className="flex-shrink-0">
<svg className="h-5 w-5 text-blue-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M18 10a8 8 0 11-16 0 8 8 0 0116 0zm-7-4a1 1 0 11-2 0 1 1 0 012 0zM9 9a1 1 0 000 2v3a1 1 0 001 1h1a1 1 0 100-2v-3a1 1 0 00-1-1H9z" clipRule="evenodd" />
</svg>
</div>
<div className="ml-3">
<h3 className="text-sm font-medium text-blue-800">Video Format Information</h3>
<div className="mt-2 text-sm text-blue-700">
<p>Video recording settings are configured in the system configuration file and require a service restart to modify.</p>
<p className="mt-1"><strong>Current benefits:</strong> MP4 format provides ~40% smaller file sizes and better web compatibility than AVI.</p>
</div>
</div>
</div>
</div>
</div>
</div>
{/* Information */}
<div className="bg-blue-50 border border-blue-200 rounded-md p-4">
<div className="flex">
<div className="flex-shrink-0">
<svg className="h-5 w-5 text-blue-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M18 10a8 8 0 11-16 0 8 8 0 0116 0zm-7-4a1 1 0 11-2 0 1 1 0 012 0zM9 9a1 1 0 000 2v3a1 1 0 001 1h1a1 1 0 100-2v-3a1 1 0 00-1-1H9z" clipRule="evenodd" />
</svg>
</div>
<div className="ml-3">
<h3 className="text-sm font-medium text-blue-800">Configuration Notes</h3>
<div className="mt-2 text-sm text-blue-700">
<ul className="list-disc list-inside space-y-1">
<li><strong>Real-time settings:</strong> Exposure, gain, image quality, white balance - apply immediately</li>
<li><strong>System settings:</strong> Video format, noise reduction, auto-recording - configured in system files</li>
<li><strong>Performance:</strong> HDR mode may impact frame rate when enabled</li>
<li><strong>White balance:</strong> RGB gains only effective when auto white balance is disabled</li>
</ul>
</div>
</div>
</div>
</div>
</div>
)}
</div>
{/* Footer */}
{config && !loading && (
<div className="px-6 py-4 border-t border-gray-200 bg-gray-50">
<div className="flex items-center justify-between">
<div className="flex items-center space-x-2">
{hasChanges && (
<span className="text-sm text-orange-600 font-medium">
You have unsaved changes
</span>
)}
</div>
<div className="flex items-center space-x-3">
{hasChanges && (
<button
onClick={resetChanges}
className="px-4 py-2 text-sm font-medium text-gray-700 bg-white border border-gray-300 rounded-md hover:bg-gray-50"
>
Reset
</button>
)}
<button
onClick={saveConfig}
disabled={!hasChanges || saving}
className="px-4 py-2 text-sm font-medium text-white bg-indigo-600 border border-transparent rounded-md hover:bg-indigo-700 disabled:opacity-50 disabled:cursor-not-allowed"
>
{saving ? 'Saving...' : 'Save Changes'}
</button>
<button
onClick={onClose}
className="px-4 py-2 text-sm font-medium text-gray-700 bg-white border border-gray-300 rounded-md hover:bg-gray-50"
>
Close
</button>
</div>
</div>
</div>
)}
</div>
</div>
)
}

View File

@@ -0,0 +1,180 @@
import React, { useState, useEffect, useRef } from 'react'
import { visionApi } from '../services/api'
interface CameraPreviewModalProps {
cameraName: string
isOpen: boolean
onClose: () => void
onError?: (error: string) => void
}
export const CameraPreviewModal: React.FC<CameraPreviewModalProps> = ({
cameraName,
isOpen,
onClose,
onError,
}) => {
const [loading, setLoading] = useState(false)
const [streaming, setStreaming] = useState(false)
const [error, setError] = useState<string | null>(null)
const imgRef = useRef<HTMLImageElement>(null)
const streamUrlRef = useRef<string | null>(null)
useEffect(() => {
if (isOpen && cameraName) {
startStreaming()
}
return () => {
if (streaming) {
stopStreaming()
}
}
}, [isOpen, cameraName])
const startStreaming = async () => {
try {
setLoading(true)
setError(null)
const result = await visionApi.startStream(cameraName)
if (result.success) {
setStreaming(true)
const streamUrl = visionApi.getStreamUrl(cameraName)
streamUrlRef.current = streamUrl
if (imgRef.current) {
imgRef.current.src = `${streamUrl}?t=${Date.now()}`
}
} else {
throw new Error(result.message)
}
} catch (err) {
const errorMessage = err instanceof Error ? err.message : 'Failed to start stream'
setError(errorMessage)
onError?.(errorMessage)
} finally {
setLoading(false)
}
}
const stopStreaming = async () => {
try {
if (streaming) {
await visionApi.stopStream(cameraName)
setStreaming(false)
streamUrlRef.current = null
if (imgRef.current) {
imgRef.current.src = ''
}
}
} catch (err) {
console.error('Error stopping stream:', err)
}
}
const handleClose = () => {
stopStreaming()
onClose()
}
if (!isOpen) return null
return (
<div className="fixed inset-0 z-[999999] flex items-center justify-center overflow-y-auto">
<div
className="fixed inset-0 h-full w-full bg-gray-900/60 backdrop-blur-sm"
onClick={handleClose}
/>
<div className="relative w-11/12 max-w-5xl rounded-xl bg-white shadow-2xl dark:bg-gray-800 p-6" onClick={(e) => e.stopPropagation()}>
{/* Close Button */}
<button
onClick={handleClose}
className="absolute right-4 top-4 z-10 flex h-10 w-10 items-center justify-center rounded-lg bg-white dark:bg-gray-800 text-gray-400 border border-gray-300 dark:border-gray-600 transition-colors hover:bg-gray-100 hover:text-gray-700 dark:hover:bg-gray-700 dark:hover:text-white"
>
<svg width="20" height="20" viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2">
<path d="M18 6L6 18M6 6l12 12" />
</svg>
</button>
<div className="mt-2">
{/* Header */}
<div className="mb-4">
<h3 className="text-xl font-semibold text-gray-900 dark:text-white">
Camera Preview: {cameraName}
</h3>
</div>
{/* Content */}
<div className="mb-4">
{loading && (
<div className="flex items-center justify-center h-96 bg-gray-100 dark:bg-gray-900 rounded-lg">
<div className="text-center">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-indigo-600 mx-auto" />
<p className="mt-4 text-gray-600 dark:text-gray-400">Starting camera stream...</p>
</div>
</div>
)}
{error && (
<div className="bg-red-50 dark:bg-red-900/20 border border-red-200 dark:border-red-800 rounded-lg p-4">
<div className="flex">
<div className="flex-shrink-0">
<svg className="h-5 w-5 text-red-400" viewBox="0 0 20 20" fill="currentColor">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z" clipRule="evenodd" />
</svg>
</div>
<div className="ml-3 flex-1">
<h3 className="text-sm font-medium text-red-800 dark:text-red-200">Stream Error</h3>
<div className="mt-2 text-sm text-red-700 dark:text-red-300">
<p>{error}</p>
</div>
<div className="mt-4">
<button
onClick={startStreaming}
className="bg-red-600 text-white px-4 py-2 rounded-md hover:bg-red-700 focus:outline-none focus:ring-2 focus:ring-red-500 focus:ring-offset-2 transition-colors"
>
Retry
</button>
</div>
</div>
</div>
</div>
)}
{streaming && !loading && !error && (
<div className="bg-black rounded-lg overflow-hidden">
<img
ref={imgRef}
alt={`Live stream from ${cameraName}`}
className="w-full h-auto max-h-[70vh] object-contain"
onError={() => setError('Failed to load camera stream')}
/>
</div>
)}
</div>
{/* Footer */}
<div className="flex items-center justify-between pt-4 border-t border-gray-200 dark:border-gray-700">
<div className="flex items-center space-x-2">
{streaming && (
<div className="flex items-center text-green-600 dark:text-green-400">
<div className="w-2 h-2 bg-green-500 rounded-full mr-2 animate-pulse" />
<span className="text-sm font-medium">Live Stream Active</span>
</div>
)}
</div>
<button
onClick={handleClose}
className="px-4 py-2 text-sm font-medium text-gray-700 dark:text-gray-300 bg-gray-100 dark:bg-gray-700 border border-gray-300 dark:border-gray-600 rounded-md hover:bg-gray-200 dark:hover:bg-gray-600 focus:outline-none focus:ring-2 focus:ring-gray-500 focus:ring-offset-2 transition-colors"
>
Close
</button>
</div>
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,124 @@
import { useEffect, useRef, useState, useCallback } from 'react'
// WebSocket message types from the API
export interface WebSocketEvent {
type: 'event'
event_type: string
source: string
data: any
timestamp: string
}
type EventHandler = (event: WebSocketEvent) => void
export function useWebSocket(url: string, options?: { enabled?: boolean }) {
const [isConnected, setIsConnected] = useState(false)
const [reconnectAttempts, setReconnectAttempts] = useState(0)
const wsRef = useRef<WebSocket | null>(null)
const reconnectTimeoutRef = useRef<NodeJS.Timeout | null>(null)
const handlersRef = useRef<Map<string, Set<EventHandler>>>(new Map())
const enabled = options?.enabled !== false
const connect = useCallback(() => {
if (!enabled || wsRef.current?.readyState === WebSocket.OPEN) {
return
}
try {
const ws = new WebSocket(url)
wsRef.current = ws
ws.onopen = () => {
setIsConnected(true)
setReconnectAttempts(0)
}
ws.onmessage = (event) => {
try {
const message: WebSocketEvent = JSON.parse(event.data)
// Call all handlers for this event type
const handlers = handlersRef.current.get(message.event_type)
if (handlers) {
handlers.forEach(handler => handler(message))
}
// Also call handlers for 'all' type
const allHandlers = handlersRef.current.get('all')
if (allHandlers) {
allHandlers.forEach(handler => handler(message))
}
} catch (err) {
console.error('Failed to parse WebSocket message:', err)
}
}
ws.onerror = (error) => {
console.error('WebSocket error:', error)
}
ws.onclose = () => {
setIsConnected(false)
// Reconnect with exponential backoff (max 10 attempts)
if (enabled && reconnectAttempts < 10) {
const delay = Math.min(1000 * Math.pow(2, reconnectAttempts), 30000)
reconnectTimeoutRef.current = setTimeout(() => {
setReconnectAttempts(prev => prev + 1)
connect()
}, delay)
}
}
} catch (err) {
console.error('Failed to create WebSocket connection:', err)
setIsConnected(false)
}
}, [url, enabled, reconnectAttempts])
useEffect(() => {
if (enabled) {
connect()
}
return () => {
if (reconnectTimeoutRef.current) {
clearTimeout(reconnectTimeoutRef.current)
}
if (wsRef.current) {
wsRef.current.close()
wsRef.current = null
}
setIsConnected(false)
}
}, [connect, enabled])
const subscribe = useCallback((eventType: string | 'all', handler: EventHandler) => {
if (!handlersRef.current.has(eventType)) {
handlersRef.current.set(eventType, new Set())
}
handlersRef.current.get(eventType)!.add(handler)
return () => {
const handlers = handlersRef.current.get(eventType)
if (handlers) {
handlers.delete(handler)
}
}
}, [])
const send = useCallback((message: any) => {
if (wsRef.current?.readyState === WebSocket.OPEN) {
wsRef.current.send(JSON.stringify(message))
} else {
console.warn('WebSocket is not connected')
}
}, [])
return {
isConnected,
subscribe,
send,
reconnect: connect,
}
}

View File

@@ -0,0 +1,11 @@
@import "tailwindcss";
body {
margin: 0;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen',
'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue',
sans-serif;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale;
}

View File

@@ -0,0 +1,11 @@
import React from 'react'
import ReactDOM from 'react-dom/client'
import App from './App'
import './index.css'
ReactDOM.createRoot(document.getElementById('root')!).render(
<React.StrictMode>
<App />
</React.StrictMode>
)

View File

@@ -0,0 +1,320 @@
// Vision System API Client for vision-system-remote
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore - Vite provides import.meta.env
const VISION_API_BASE_URL = (import.meta.env?.VITE_VISION_API_URL as string | undefined) || 'http://exp-dash:8000'
// Types (simplified - we'll use the same types from the original)
export interface SystemStatus {
system_started: boolean
mqtt_connected: boolean
last_mqtt_message: string
machines: Record<string, MachineStatus>
cameras: Record<string, CameraStatus>
active_recordings: number
total_recordings: number
uptime_seconds: number
}
export interface MachineStatus {
name: string
state: string
last_updated: string
last_message?: string
mqtt_topic?: string
}
export interface CameraStatus {
name?: string
status: string
is_recording: boolean
last_checked: string
last_error?: string | null
device_info?: {
friendly_name?: string
serial_number?: string
port_type?: string
model?: string
firmware_version?: string
}
current_recording_file?: string | null
recording_start_time?: string | null
last_frame_time?: string
frame_rate?: number
auto_recording_enabled: boolean
auto_recording_active: boolean
auto_recording_failure_count: number
auto_recording_last_attempt?: string
auto_recording_last_error?: string
}
export interface RecordingInfo {
camera_name: string
filename: string
start_time: string
state: string
end_time?: string
file_size_bytes?: number
frame_count?: number
duration_seconds?: number
error_message?: string | null
}
export interface StorageStats {
base_path: string
total_files: number
total_size_bytes: number
cameras: Record<string, {
file_count: number
total_size_bytes: number
}>
disk_usage: {
total: number
used: number
free: number
}
}
export interface MqttStatus {
connected: boolean
broker_host: string
broker_port: number
subscribed_topics: string[]
last_message_time: string
message_count: number
error_count: number
uptime_seconds: number
}
export interface StartRecordingResponse {
success: boolean
message: string
filename: string
}
export interface StopRecordingResponse {
success: boolean
message: string
}
export interface StreamStopResponse {
success: boolean
message: string
}
export interface CameraConfig {
name: string
machine_topic: string
storage_path: string
enabled: boolean
auto_start_recording_enabled: boolean
auto_recording_max_retries: number
auto_recording_retry_delay_seconds: number
exposure_ms: number
gain: number
target_fps: number
video_format: string
video_codec: string
video_quality: number
sharpness: number
contrast: number
saturation: number
gamma: number
noise_filter_enabled: boolean
denoise_3d_enabled: boolean
auto_white_balance: boolean
color_temperature_preset: number
wb_red_gain: number
wb_green_gain: number
wb_blue_gain: number
anti_flicker_enabled: boolean
light_frequency: number
bit_depth: number
hdr_enabled: boolean
hdr_gain_mode: number
}
export interface CameraConfigUpdate {
exposure_ms?: number
gain?: number
target_fps?: number
sharpness?: number
contrast?: number
saturation?: number
gamma?: number
noise_filter_enabled?: boolean
denoise_3d_enabled?: boolean
auto_white_balance?: boolean
color_temperature_preset?: number
wb_red_gain?: number
wb_green_gain?: number
wb_blue_gain?: number
anti_flicker_enabled?: boolean
light_frequency?: number
hdr_enabled?: boolean
hdr_gain_mode?: number
auto_start_recording_enabled?: boolean
auto_recording_max_retries?: number
auto_recording_retry_delay_seconds?: number
}
export interface CameraConfigUpdateResponse {
success: boolean
message: string
updated_settings: string[]
}
export interface CameraRecoveryResponse {
success: boolean
message: string
camera_name: string
operation: string
}
export interface StreamStartResponse {
success: boolean
message: string
}
class VisionApiClient {
private baseUrl: string
constructor(baseUrl: string = VISION_API_BASE_URL) {
this.baseUrl = baseUrl
}
private async request<T>(endpoint: string, options: RequestInit = {}): Promise<T> {
const url = `${this.baseUrl}${endpoint}`
const response = await fetch(url, {
headers: {
'Content-Type': 'application/json',
...options.headers,
},
...options,
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(`API request failed: ${response.status} ${response.statusText} - ${errorText}`)
}
return response.json()
}
async getSystemStatus(): Promise<SystemStatus> {
return this.request('/system/status')
}
async getCameras(): Promise<Record<string, CameraStatus>> {
return this.request('/cameras')
}
async getRecordings(): Promise<Record<string, RecordingInfo>> {
return this.request('/recordings')
}
async getStorageStats(): Promise<StorageStats> {
return this.request('/storage/stats')
}
async getMqttStatus(): Promise<MqttStatus> {
return this.request('/mqtt/status')
}
async startRecording(cameraName: string, filename?: string): Promise<StartRecordingResponse> {
return this.request(`/cameras/${cameraName}/start-recording`, {
method: 'POST',
body: JSON.stringify({ filename }),
})
}
async stopRecording(cameraName: string): Promise<StopRecordingResponse> {
return this.request(`/cameras/${cameraName}/stop-recording`, {
method: 'POST',
})
}
async stopStream(cameraName: string): Promise<StreamStopResponse> {
return this.request(`/cameras/${cameraName}/stop-stream`, {
method: 'POST',
})
}
getStreamUrl(cameraName: string): string {
return `${this.baseUrl}/cameras/${cameraName}/stream`
}
async startStream(cameraName: string): Promise<StreamStartResponse> {
return this.request(`/cameras/${cameraName}/start-stream`, {
method: 'POST',
})
}
async getCameraConfig(cameraName: string): Promise<CameraConfig> {
return this.request(`/cameras/${cameraName}/config`)
}
async updateCameraConfig(cameraName: string, config: CameraConfigUpdate): Promise<CameraConfigUpdateResponse> {
return this.request(`/cameras/${cameraName}/config`, {
method: 'PUT',
body: JSON.stringify(config),
})
}
async applyCameraConfig(cameraName: string): Promise<{ success: boolean; message: string }> {
return this.request(`/cameras/${cameraName}/apply-config`, {
method: 'POST',
})
}
async reinitializeCamera(cameraName: string): Promise<CameraRecoveryResponse> {
return this.request(`/cameras/${cameraName}/reinitialize`, {
method: 'POST',
})
}
async fullResetCamera(cameraName: string): Promise<CameraRecoveryResponse> {
return this.request(`/cameras/${cameraName}/full-reset`, {
method: 'POST',
})
}
}
export const visionApi = new VisionApiClient()
// Utility functions
export const formatBytes = (bytes: number): string => {
if (bytes === 0) return '0 Bytes'
const k = 1024
const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB']
const i = Math.floor(Math.log(bytes) / Math.log(k))
return parseFloat((bytes / Math.pow(k, i)).toFixed(2)) + ' ' + sizes[i]
}
export const formatDuration = (seconds: number): string => {
const hours = Math.floor(seconds / 3600)
const minutes = Math.floor((seconds % 3600) / 60)
const secs = Math.floor(seconds % 60)
if (hours > 0) {
return `${hours}h ${minutes}m ${secs}s`
} else if (minutes > 0) {
return `${minutes}m ${secs}s`
} else {
return `${secs}s`
}
}
export const formatUptime = (seconds: number): string => {
const days = Math.floor(seconds / 86400)
const hours = Math.floor((seconds % 86400) / 3600)
const minutes = Math.floor((seconds % 3600) / 60)
if (days > 0) {
return `${days}d ${hours}h ${minutes}m`
} else if (hours > 0) {
return `${hours}h ${minutes}m`
} else {
return `${minutes}m`
}
}

12
vision-system-remote/src/vite-env.d.ts vendored Normal file
View File

@@ -0,0 +1,12 @@
/// <reference types="vite/client" />
interface ImportMetaEnv {
readonly VITE_VISION_API_URL?: string
readonly VITE_WS_URL?: string
readonly VITE_MEDIA_API_URL?: string
}
interface ImportMeta {
readonly env: ImportMetaEnv
}

View File

@@ -0,0 +1,30 @@
import React from 'react'
interface CameraCountWidgetProps {
cameraCount: number
machineCount: number
}
export const CameraCountWidget: React.FC<CameraCountWidgetProps> = ({
cameraCount,
machineCount,
}) => {
return (
<div className="bg-white overflow-hidden shadow rounded-lg">
<div className="p-5">
<div className="flex items-center">
<div className="flex-shrink-0">
<div className="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-purple-100 text-purple-800">
{cameraCount} Cameras
</div>
</div>
</div>
<div className="mt-4">
<div className="text-2xl font-semibold text-gray-900">Devices</div>
<div className="mt-1 text-sm text-gray-500">{machineCount} Machines</div>
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,25 @@
import React from 'react'
import { StatusWidget } from './StatusWidget'
import type { SystemStatus } from '../services/api'
interface MqttStatusWidgetProps {
systemStatus: SystemStatus | null
}
export const MqttStatusWidget: React.FC<MqttStatusWidgetProps> = ({ systemStatus }) => {
const isConnected = systemStatus?.mqtt_connected ?? false
const lastMessage = systemStatus?.last_mqtt_message
return (
<StatusWidget
title="MQTT Status"
status={isConnected}
statusText={isConnected ? 'Connected' : 'Disconnected'}
subtitle={lastMessage ? `Last: ${new Date(lastMessage).toLocaleTimeString()}` : 'No messages'}
icon={
<div className={`w-3 h-3 rounded-full ${isConnected ? 'bg-green-500 animate-pulse' : 'bg-red-500'}`} />
}
/>
)
}

View File

@@ -0,0 +1,27 @@
import React from 'react'
interface RecordingsCountWidgetProps {
active: number
total: number
}
export const RecordingsCountWidget: React.FC<RecordingsCountWidgetProps> = ({ active, total }) => {
return (
<div className="bg-white overflow-hidden shadow rounded-lg">
<div className="p-5">
<div className="flex items-center">
<div className="flex-shrink-0">
<div className="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-blue-100 text-blue-800">
{active} Active
</div>
</div>
</div>
<div className="mt-4">
<div className="text-2xl font-semibold text-gray-900">Recordings</div>
<div className="mt-1 text-sm text-gray-500">Total: {total}</div>
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,47 @@
import React from 'react'
interface StatusWidgetProps {
title: string
status: boolean
statusText?: string
subtitle?: string
icon?: React.ReactNode
className?: string
}
export const StatusWidget: React.FC<StatusWidgetProps> = ({
title,
status,
statusText,
subtitle,
icon,
className = '',
}) => {
return (
<div className={`bg-white overflow-hidden shadow rounded-lg ${className}`}>
<div className="p-5">
<div className="flex items-center justify-between">
<div className="flex items-center space-x-3">
{icon && <div className="flex-shrink-0">{icon}</div>}
<div className="flex-shrink-0">
<div
className={`inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${
status ? 'bg-green-100 text-green-800' : 'bg-red-100 text-red-800'
}`}
>
{statusText || (status ? 'Online' : 'Offline')}
</div>
</div>
</div>
</div>
<div className="mt-4">
<div className="text-2xl font-semibold text-gray-900">{title}</div>
{subtitle && (
<div className="mt-1 text-sm text-gray-500">{subtitle}</div>
)}
</div>
</div>
</div>
)
}

View File

@@ -0,0 +1,26 @@
import React from 'react'
import { StatusWidget } from './StatusWidget'
import { formatUptime } from '../services/api'
import type { SystemStatus } from '../services/api'
interface SystemHealthWidgetProps {
systemStatus: SystemStatus | null
}
export const SystemHealthWidget: React.FC<SystemHealthWidgetProps> = ({ systemStatus }) => {
const isOnline = systemStatus?.system_started ?? false
const uptime = systemStatus?.uptime_seconds ?? 0
return (
<StatusWidget
title="System Status"
status={isOnline}
statusText={isOnline ? 'Online' : 'Offline'}
subtitle={uptime > 0 ? `Uptime: ${formatUptime(uptime)}` : undefined}
icon={
<div className={`w-3 h-3 rounded-full ${isOnline ? 'bg-green-500' : 'bg-red-500'}`} />
}
/>
)
}

View File

@@ -0,0 +1,26 @@
{
"compilerOptions": {
"target": "ES2020",
"useDefineForClassFields": true,
"lib": ["ES2020", "DOM", "DOM.Iterable"],
"module": "ESNext",
"skipLibCheck": true,
/* Bundler mode */
"moduleResolution": "bundler",
"allowImportingTsExtensions": true,
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
"jsx": "react-jsx",
/* Linting */
"strict": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"noFallthroughCasesInSwitch": true
},
"include": ["src", "src/vite-env.d.ts"],
"references": [{ "path": "./tsconfig.node.json" }]
}

View File

@@ -0,0 +1,11 @@
{
"compilerOptions": {
"composite": true,
"skipLibCheck": true,
"module": "ESNext",
"moduleResolution": "bundler",
"allowSyntheticDefaultImports": true
},
"include": ["vite.config.ts"]
}

View File

@@ -0,0 +1,32 @@
import { defineConfig } from 'vite'
import react from '@vitejs/plugin-react'
import federation from '@originjs/vite-plugin-federation'
import tailwindcss from '@tailwindcss/vite'
export default defineConfig({
plugins: [
react(),
tailwindcss(),
federation({
name: 'visionSystemRemote',
filename: 'remoteEntry.js',
exposes: {
'./App': './src/App.tsx',
},
shared: {
react: { singleton: true, eager: true },
'react-dom': { singleton: true, eager: true },
},
}),
],
server: {
port: 3002,
host: '0.0.0.0',
allowedHosts: ['exp-dash', 'localhost'],
cors: true
},
build: {
target: 'esnext',
},
})