Refactor video streaming feature and update dependencies

- Replaced npm ci with npm install in docker-compose for better package management.
- Introduced remote component loading for the VideoStreamingPage with error handling.
- Updated the title in index.html to "Experiments Dashboard" for clarity.
- Added new video remote service configuration in docker-compose for improved integration.
- Removed deprecated files and components related to the video streaming feature to streamline the codebase.
- Updated package.json and package-lock.json to include @originjs/vite-plugin-federation for module federation support.
This commit is contained in:
salirezav
2025-10-30 15:36:19 -04:00
parent 9f669e7dff
commit 0b724fe59b
102 changed files with 4656 additions and 13376 deletions

View File

@@ -54,7 +54,7 @@ services:
- TZ=America/New_York - TZ=America/New_York
command: > command: >
sh -lc " sh -lc "
npm ci; npm install;
npm run dev -- --host 0.0.0.0 --port 8080 npm run dev -- --host 0.0.0.0 --port 8080
" "
# Ensure the web container can resolve host.docker.internal on Linux # Ensure the web container can resolve host.docker.internal on Linux
@@ -62,3 +62,21 @@ services:
- "host.docker.internal:host-gateway" - "host.docker.internal:host-gateway"
ports: ports:
- "8080:8080" - "8080:8080"
video-remote:
image: node:20-alpine
working_dir: /app
environment:
- TZ=America/New_York
volumes:
- ./video-remote:/app
command: >
sh -lc "
npm install;
npm run build;
npx http-server dist -p 3001 --cors -c-1
"
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "3001:3001"

View File

@@ -1,415 +0,0 @@
# 🤖 AI Agent Video Integration Guide
This guide provides comprehensive step-by-step instructions for AI agents and external systems to successfully integrate with the USDA Vision Camera System's video streaming functionality.
## 🎯 Overview
The USDA Vision Camera System provides a complete video streaming API that allows AI agents to:
- Browse and select videos from multiple cameras
- Stream videos with seeking capabilities
- Generate thumbnails for preview
- Access video metadata and technical information
## 🔗 API Base Configuration
### Connection Details
```bash
# Default API Base URL
API_BASE_URL="http://localhost:8000"
# For remote access, replace with actual server IP/hostname
API_BASE_URL="http://192.168.1.100:8000"
```
### Authentication
**⚠️ IMPORTANT: No authentication is currently required.**
- All endpoints are publicly accessible
- No API keys or tokens needed
- CORS is enabled for web browser integration
## 📋 Step-by-Step Integration Workflow
### Step 1: Verify System Connectivity
```bash
# Test basic connectivity
curl -f "${API_BASE_URL}/health" || echo "❌ System not accessible"
# Check system status
curl "${API_BASE_URL}/system/status"
```
**Expected Response:**
```json
{
"status": "healthy",
"timestamp": "2025-08-05T10:30:00Z"
}
```
### Step 2: List Available Videos
```bash
# Get all videos with metadata
curl "${API_BASE_URL}/videos/?include_metadata=true&limit=50"
# Filter by specific camera
curl "${API_BASE_URL}/videos/?camera_name=camera1&include_metadata=true"
# Filter by date range
curl "${API_BASE_URL}/videos/?start_date=2025-08-04T00:00:00&end_date=2025-08-05T23:59:59"
```
**Response Structure:**
```json
{
"videos": [
{
"file_id": "camera1_auto_blower_separator_20250804_143022.mp4",
"camera_name": "camera1",
"filename": "camera1_auto_blower_separator_20250804_143022.mp4",
"file_size_bytes": 31457280,
"format": "mp4",
"status": "completed",
"created_at": "2025-08-04T14:30:22",
"start_time": "2025-08-04T14:30:22",
"end_time": "2025-08-04T14:32:22",
"machine_trigger": "blower_separator",
"is_streamable": true,
"needs_conversion": false,
"metadata": {
"duration_seconds": 120.5,
"width": 1920,
"height": 1080,
"fps": 30.0,
"codec": "mp4v",
"bitrate": 5000000,
"aspect_ratio": 1.777
}
}
],
"total_count": 1
}
```
### Step 3: Select and Validate Video
```bash
# Get detailed video information
FILE_ID="camera1_auto_blower_separator_20250804_143022.mp4"
curl "${API_BASE_URL}/videos/${FILE_ID}"
# Validate video is playable
curl -X POST "${API_BASE_URL}/videos/${FILE_ID}/validate"
# Get streaming technical details
curl "${API_BASE_URL}/videos/${FILE_ID}/info"
```
### Step 4: Generate Video Thumbnail
```bash
# Generate thumbnail at 5 seconds, 320x240 resolution
curl "${API_BASE_URL}/videos/${FILE_ID}/thumbnail?timestamp=5.0&width=320&height=240" \
--output "thumbnail_${FILE_ID}.jpg"
# Generate multiple thumbnails for preview
for timestamp in 1 30 60 90; do
curl "${API_BASE_URL}/videos/${FILE_ID}/thumbnail?timestamp=${timestamp}&width=160&height=120" \
--output "preview_${timestamp}s.jpg"
done
```
### Step 5: Stream Video Content
```bash
# Stream entire video
curl "${API_BASE_URL}/videos/${FILE_ID}/stream" --output "video.mp4"
# Stream specific byte range (for seeking)
curl -H "Range: bytes=0-1048575" \
"${API_BASE_URL}/videos/${FILE_ID}/stream" \
--output "video_chunk.mp4"
# Test range request support
curl -I -H "Range: bytes=0-1023" \
"${API_BASE_URL}/videos/${FILE_ID}/stream"
```
## 🔧 Programming Language Examples
### Python Integration
```python
import requests
import json
from typing import List, Dict, Optional
class USDAVideoClient:
def __init__(self, base_url: str = "http://localhost:8000"):
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
def list_videos(self, camera_name: Optional[str] = None,
include_metadata: bool = True, limit: int = 50) -> Dict:
"""List available videos with optional filtering."""
params = {
'include_metadata': include_metadata,
'limit': limit
}
if camera_name:
params['camera_name'] = camera_name
response = self.session.get(f"{self.base_url}/videos/", params=params)
response.raise_for_status()
return response.json()
def get_video_info(self, file_id: str) -> Dict:
"""Get detailed video information."""
response = self.session.get(f"{self.base_url}/videos/{file_id}")
response.raise_for_status()
return response.json()
def get_thumbnail(self, file_id: str, timestamp: float = 1.0,
width: int = 320, height: int = 240) -> bytes:
"""Generate and download video thumbnail."""
params = {
'timestamp': timestamp,
'width': width,
'height': height
}
response = self.session.get(
f"{self.base_url}/videos/{file_id}/thumbnail",
params=params
)
response.raise_for_status()
return response.content
def stream_video_range(self, file_id: str, start_byte: int,
end_byte: int) -> bytes:
"""Stream specific byte range of video."""
headers = {'Range': f'bytes={start_byte}-{end_byte}'}
response = self.session.get(
f"{self.base_url}/videos/{file_id}/stream",
headers=headers
)
response.raise_for_status()
return response.content
def validate_video(self, file_id: str) -> bool:
"""Validate that video is accessible and playable."""
response = self.session.post(f"{self.base_url}/videos/{file_id}/validate")
response.raise_for_status()
return response.json().get('is_valid', False)
# Usage example
client = USDAVideoClient("http://192.168.1.100:8000")
# List videos from camera1
videos = client.list_videos(camera_name="camera1")
print(f"Found {videos['total_count']} videos")
# Select first video
if videos['videos']:
video = videos['videos'][0]
file_id = video['file_id']
# Validate video
if client.validate_video(file_id):
print(f"✅ Video {file_id} is valid")
# Get thumbnail
thumbnail = client.get_thumbnail(file_id, timestamp=5.0)
with open(f"thumbnail_{file_id}.jpg", "wb") as f:
f.write(thumbnail)
# Stream first 1MB
chunk = client.stream_video_range(file_id, 0, 1048575)
print(f"Downloaded {len(chunk)} bytes")
```
### JavaScript/Node.js Integration
```javascript
class USDAVideoClient {
constructor(baseUrl = 'http://localhost:8000') {
this.baseUrl = baseUrl.replace(/\/$/, '');
}
async listVideos(options = {}) {
const params = new URLSearchParams({
include_metadata: options.includeMetadata || true,
limit: options.limit || 50
});
if (options.cameraName) {
params.append('camera_name', options.cameraName);
}
const response = await fetch(`${this.baseUrl}/videos/?${params}`);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return response.json();
}
async getVideoInfo(fileId) {
const response = await fetch(`${this.baseUrl}/videos/${fileId}`);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return response.json();
}
async getThumbnail(fileId, options = {}) {
const params = new URLSearchParams({
timestamp: options.timestamp || 1.0,
width: options.width || 320,
height: options.height || 240
});
const response = await fetch(
`${this.baseUrl}/videos/${fileId}/thumbnail?${params}`
);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
return response.blob();
}
async validateVideo(fileId) {
const response = await fetch(
`${this.baseUrl}/videos/${fileId}/validate`,
{ method: 'POST' }
);
if (!response.ok) throw new Error(`HTTP ${response.status}`);
const result = await response.json();
return result.is_valid;
}
getStreamUrl(fileId) {
return `${this.baseUrl}/videos/${fileId}/stream`;
}
}
// Usage example
const client = new USDAVideoClient('http://192.168.1.100:8000');
async function integrateWithVideos() {
try {
// List videos
const videos = await client.listVideos({ cameraName: 'camera1' });
console.log(`Found ${videos.total_count} videos`);
if (videos.videos.length > 0) {
const video = videos.videos[0];
const fileId = video.file_id;
// Validate video
const isValid = await client.validateVideo(fileId);
if (isValid) {
console.log(`✅ Video ${fileId} is valid`);
// Get thumbnail
const thumbnail = await client.getThumbnail(fileId, {
timestamp: 5.0,
width: 320,
height: 240
});
// Create video element for playback
const videoElement = document.createElement('video');
videoElement.controls = true;
videoElement.src = client.getStreamUrl(fileId);
document.body.appendChild(videoElement);
}
}
} catch (error) {
console.error('Integration error:', error);
}
}
```
## 🚨 Error Handling
### Common HTTP Status Codes
```bash
# Success responses
200 # OK - Request successful
206 # Partial Content - Range request successful
# Client error responses
400 # Bad Request - Invalid parameters
404 # Not Found - Video file doesn't exist
416 # Range Not Satisfiable - Invalid range request
# Server error responses
500 # Internal Server Error - Failed to process video
503 # Service Unavailable - Video module not available
```
### Error Response Format
```json
{
"detail": "Video camera1_recording_20250804_143022.avi not found"
}
```
### Robust Error Handling Example
```python
def safe_video_operation(client, file_id):
try:
# Validate video first
if not client.validate_video(file_id):
return {"error": "Video is not valid or accessible"}
# Get video info
video_info = client.get_video_info(file_id)
# Check if streamable
if not video_info.get('is_streamable', False):
return {"error": "Video is not streamable"}
return {"success": True, "video_info": video_info}
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
return {"error": "Video not found"}
elif e.response.status_code == 416:
return {"error": "Invalid range request"}
else:
return {"error": f"HTTP error: {e.response.status_code}"}
except requests.exceptions.ConnectionError:
return {"error": "Cannot connect to video server"}
except Exception as e:
return {"error": f"Unexpected error: {str(e)}"}
```
## ✅ Integration Checklist
### Pre-Integration
- [ ] Verify network connectivity to USDA Vision Camera System
- [ ] Test basic API endpoints (`/health`, `/system/status`)
- [ ] Understand video file naming conventions
- [ ] Plan error handling strategy
### Video Selection
- [ ] Implement video listing with appropriate filters
- [ ] Add video validation before processing
- [ ] Handle pagination for large video collections
- [ ] Implement caching for video metadata
### Video Playback
- [ ] Test video streaming with range requests
- [ ] Implement thumbnail generation for previews
- [ ] Add progress tracking for video playback
- [ ] Handle different video formats (MP4, AVI)
### Error Handling
- [ ] Handle network connectivity issues
- [ ] Manage video not found scenarios
- [ ] Deal with invalid range requests
- [ ] Implement retry logic for transient failures
### Performance
- [ ] Use range requests for efficient seeking
- [ ] Implement client-side caching where appropriate
- [ ] Monitor bandwidth usage for video streaming
- [ ] Consider thumbnail caching for better UX
## 🎯 Next Steps
1. **Test Integration**: Use the provided examples to test basic connectivity
2. **Implement Error Handling**: Add robust error handling for production use
3. **Optimize Performance**: Implement caching and efficient streaming
4. **Monitor Usage**: Track API usage and performance metrics
5. **Security Review**: Consider authentication if exposing externally
This guide provides everything needed for successful integration with the USDA Vision Camera System's video streaming functionality. The system is designed to be simple and reliable for AI agents and external systems to consume video content efficiently.

View File

@@ -1,207 +0,0 @@
# API Changes Summary: Camera Settings and Video Format Updates
## Overview
This document tracks major API changes including camera settings enhancements and the MP4 video format update.
## 🎥 Latest Update: MP4 Video Format (v2.1)
**Date**: August 2025
**Major Changes**:
- **Video Format**: Changed from AVI/XVID to MP4/MPEG-4 format
- **File Extensions**: New recordings use `.mp4` instead of `.avi`
- **File Size**: ~40% reduction in file sizes
- **Streaming**: Better web browser compatibility
**New Configuration Fields**:
```json
{
"video_format": "mp4", // File format: "mp4" or "avi"
"video_codec": "mp4v", // Video codec: "mp4v", "XVID", "MJPG"
"video_quality": 95 // Quality: 0-100 (higher = better)
}
```
**Frontend Impact**:
- ✅ Better streaming performance and browser support
- ✅ Smaller file sizes for faster transfers
- ✅ Universal HTML5 video player compatibility
- ✅ Backward compatible with existing AVI files
**Documentation**: See [MP4 Format Update Guide](MP4_FORMAT_UPDATE.md)
---
## Previous Changes: Camera Settings and Filename Handling
Enhanced the `POST /cameras/{camera_name}/start-recording` API endpoint to accept optional camera settings (shutter speed/exposure, gain, and fps) and ensure all filenames have datetime prefixes.
## Changes Made
### 1. API Models (`usda_vision_system/api/models.py`)
- **Enhanced `StartRecordingRequest`** to include optional parameters:
- `exposure_ms: Optional[float]` - Exposure time in milliseconds
- `gain: Optional[float]` - Camera gain value
- `fps: Optional[float]` - Target frames per second
### 2. Camera Recorder (`usda_vision_system/camera/recorder.py`)
- **Added `update_camera_settings()` method** to dynamically update camera settings:
- Updates exposure time using `mvsdk.CameraSetExposureTime()`
- Updates gain using `mvsdk.CameraSetAnalogGain()`
- Updates target FPS in camera configuration
- Logs all setting changes
- Returns boolean indicating success/failure
### 3. Camera Manager (`usda_vision_system/camera/manager.py`)
- **Enhanced `manual_start_recording()` method** to accept new parameters:
- Added optional `exposure_ms`, `gain`, and `fps` parameters
- Calls `update_camera_settings()` if any settings are provided
- **Automatic datetime prefix**: Always prepends timestamp to filename
- If custom filename provided: `{timestamp}_{custom_filename}`
- If no filename provided: `{camera_name}_manual_{timestamp}.avi`
### 4. API Server (`usda_vision_system/api/server.py`)
- **Updated start-recording endpoint** to:
- Pass new camera settings to camera manager
- Handle filename response with datetime prefix
- Maintain backward compatibility with existing requests
### 5. API Tests (`api-tests.http`)
- **Added comprehensive test examples**:
- Basic recording (existing functionality)
- Recording with camera settings
- Recording with settings only (no filename)
- Different parameter combinations
## Usage Examples
### Basic Recording (unchanged)
```http
POST http://localhost:8000/cameras/camera1/start-recording
Content-Type: application/json
{
"camera_name": "camera1",
"filename": "test.avi"
}
```
**Result**: File saved as `20241223_143022_test.avi`
### Recording with Camera Settings
```http
POST http://localhost:8000/cameras/camera1/start-recording
Content-Type: application/json
{
"camera_name": "camera1",
"filename": "high_quality.avi",
"exposure_ms": 2.0,
"gain": 4.0,
"fps": 5.0
}
```
**Result**:
- Camera settings updated before recording
- File saved as `20241223_143022_high_quality.avi`
### Maximum FPS Recording
```http
POST http://localhost:8000/cameras/camera1/start-recording
Content-Type: application/json
{
"camera_name": "camera1",
"filename": "max_speed.avi",
"exposure_ms": 0.1,
"gain": 1.0,
"fps": 0
}
```
**Result**:
- Camera captures at maximum possible speed (no delay between frames)
- Video file saved with 30 FPS metadata for proper playback
- Actual capture rate depends on camera hardware and exposure settings
### Settings Only (no filename)
```http
POST http://localhost:8000/cameras/camera1/start-recording
Content-Type: application/json
{
"camera_name": "camera1",
"exposure_ms": 1.5,
"gain": 3.0,
"fps": 7.0
}
```
**Result**:
- Camera settings updated
- File saved as `camera1_manual_20241223_143022.avi`
## Key Features
### 1. **Backward Compatibility**
- All existing API calls continue to work unchanged
- New parameters are optional
- Default behavior preserved when no settings provided
### 2. **Automatic Datetime Prefix**
- **ALL filenames now have datetime prefix** regardless of what's sent
- Format: `YYYYMMDD_HHMMSS_` (Atlanta timezone)
- Ensures unique filenames and chronological ordering
### 3. **Dynamic Camera Settings**
- Settings can be changed per recording without restarting system
- Based on proven implementation from `old tests/camera_video_recorder.py`
- Proper error handling and logging
### 4. **Maximum FPS Capture**
- **`fps: 0`** = Capture at maximum possible speed (no delay between frames)
- **`fps > 0`** = Capture at specified frame rate with controlled timing
- **`fps` omitted** = Uses camera config default (usually 3.0 fps)
- Video files saved with 30 FPS metadata when fps=0 for proper playback
### 5. **Parameter Validation**
- Uses Pydantic models for automatic validation
- Optional parameters with proper type checking
- Descriptive field documentation
## Testing
Run the test script to verify functionality:
```bash
# Start the system first
python main.py
# In another terminal, run tests
python test_api_changes.py
```
The test script verifies:
- Basic recording functionality
- Camera settings application
- Filename datetime prefix handling
- API response accuracy
## Implementation Notes
### Camera Settings Mapping
- **Exposure**: Converted from milliseconds to microseconds for SDK
- **Gain**: Converted to camera units (multiplied by 100)
- **FPS**: Stored in camera config, used by recording loop
### Error Handling
- Settings update failures are logged but don't prevent recording
- Invalid camera names return appropriate HTTP errors
- Camera initialization failures are handled gracefully
### Filename Generation
- Uses `format_filename_timestamp()` from timezone utilities
- Ensures Atlanta timezone consistency
- Handles both custom and auto-generated filenames
## Similar to Old Implementation
The camera settings functionality mirrors the proven approach in `old tests/camera_video_recorder.py`:
- Same parameter names and ranges
- Same SDK function calls
- Same conversion factors
- Proven to work with the camera hardware

View File

@@ -1,824 +0,0 @@
# 🚀 USDA Vision Camera System - Complete API Documentation
This document provides comprehensive documentation for all API endpoints in the USDA Vision Camera System, including recent enhancements and new features.
## 📋 Table of Contents
- [🔧 System Status & Health](#-system-status--health)
- [📷 Camera Management](#-camera-management)
- [🎥 Recording Control](#-recording-control)
- [🤖 Auto-Recording Management](#-auto-recording-management)
- [🎛️ Camera Configuration](#-camera-configuration)
- [📡 MQTT & Machine Status](#-mqtt--machine-status)
- [💾 Storage & File Management](#-storage--file-management)
- [🔄 Camera Recovery & Diagnostics](#-camera-recovery--diagnostics)
- [📺 Live Streaming](#-live-streaming)
- [🎬 Video Streaming & Playback](#-video-streaming--playback)
- [🌐 WebSocket Real-time Updates](#-websocket-real-time-updates)
## 🔧 System Status & Health
### Get System Status
```http
GET /system/status
```
**Response**: `SystemStatusResponse`
```json
{
"system_started": true,
"mqtt_connected": true,
"last_mqtt_message": "2024-01-15T10:30:00Z",
"machines": {
"vibratory_conveyor": {
"name": "vibratory_conveyor",
"state": "ON",
"last_updated": "2024-01-15T10:30:00Z"
}
},
"cameras": {
"camera1": {
"name": "camera1",
"status": "ACTIVE",
"is_recording": false,
"auto_recording_enabled": true
}
},
"active_recordings": 0,
"total_recordings": 15,
"uptime_seconds": 3600.5
}
```
### Health Check
```http
GET /health
```
**Response**: Simple health status
```json
{
"status": "healthy",
"timestamp": "2024-01-15T10:30:00Z"
}
```
## 📷 Camera Management
### Get All Cameras
```http
GET /cameras
```
**Response**: `Dict[str, CameraStatusResponse]`
### Get Specific Camera Status
```http
GET /cameras/{camera_name}/status
```
**Response**: `CameraStatusResponse`
```json
{
"name": "camera1",
"status": "ACTIVE",
"is_recording": false,
"last_checked": "2024-01-15T10:30:00Z",
"last_error": null,
"device_info": {
"model": "GigE Camera",
"serial": "12345"
},
"current_recording_file": null,
"recording_start_time": null,
"auto_recording_enabled": true,
"auto_recording_active": false,
"auto_recording_failure_count": 0,
"auto_recording_last_attempt": null,
"auto_recording_last_error": null
}
```
## 🎥 Recording Control
### Start Recording
```http
POST /cameras/{camera_name}/start-recording
Content-Type: application/json
{
"filename": "test_recording.avi",
"exposure_ms": 2.0,
"gain": 4.0,
"fps": 5.0
}
```
**Request Model**: `StartRecordingRequest`
- `filename` (optional): Custom filename (datetime prefix will be added automatically)
- `exposure_ms` (optional): Exposure time in milliseconds
- `gain` (optional): Camera gain value
- `fps` (optional): Target frames per second
**Response**: `StartRecordingResponse`
```json
{
"success": true,
"message": "Recording started for camera1",
"filename": "20240115_103000_test_recording.avi"
}
```
**Key Features**:
-**Automatic datetime prefix**: All filenames get `YYYYMMDD_HHMMSS_` prefix
-**Dynamic camera settings**: Adjust exposure, gain, and FPS per recording
-**Backward compatibility**: All existing API calls work unchanged
### Stop Recording
```http
POST /cameras/{camera_name}/stop-recording
```
**Response**: `StopRecordingResponse`
```json
{
"success": true,
"message": "Recording stopped for camera1",
"duration_seconds": 45.2
}
```
## 🤖 Auto-Recording Management
### Enable Auto-Recording for Camera
```http
POST /cameras/{camera_name}/auto-recording/enable
```
**Response**: `AutoRecordingConfigResponse`
```json
{
"success": true,
"message": "Auto-recording enabled for camera1",
"camera_name": "camera1",
"enabled": true
}
```
### Disable Auto-Recording for Camera
```http
POST /cameras/{camera_name}/auto-recording/disable
```
**Response**: `AutoRecordingConfigResponse`
### Get Auto-Recording Status
```http
GET /auto-recording/status
```
**Response**: `AutoRecordingStatusResponse`
```json
{
"running": true,
"auto_recording_enabled": true,
"retry_queue": {},
"enabled_cameras": ["camera1", "camera2"]
}
```
**Auto-Recording Features**:
- 🤖 **MQTT-triggered recording**: Automatically starts/stops based on machine state
- 🔄 **Retry logic**: Failed recordings are retried with configurable delays
- 📊 **Per-camera control**: Enable/disable auto-recording individually
- 📈 **Status tracking**: Monitor failure counts and last attempts
## 🎛️ Camera Configuration
### Get Camera Configuration
```http
GET /cameras/{camera_name}/config
```
**Response**: `CameraConfigResponse`
```json
{
"name": "camera1",
"machine_topic": "blower_separator",
"storage_path": "/storage/camera1",
"exposure_ms": 0.3,
"gain": 4.0,
"target_fps": 0,
"enabled": true,
"video_format": "mp4",
"video_codec": "mp4v",
"video_quality": 95,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
"contrast": 100,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": false,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 0,
"wb_red_gain": 0.94,
"wb_green_gain": 1.0,
"wb_blue_gain": 0.87,
"anti_flicker_enabled": false,
"light_frequency": 0,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 2
}
```
### Update Camera Configuration
```http
PUT /cameras/{camera_name}/config
Content-Type: application/json
{
"exposure_ms": 2.0,
"gain": 4.0,
"target_fps": 5.0,
"sharpness": 130
}
```
### Apply Configuration (Restart Required)
```http
POST /cameras/{camera_name}/apply-config
```
**Configuration Categories**:
-**Real-time**: `exposure_ms`, `gain`, `target_fps`, `sharpness`, `contrast`, etc.
- ⚠️ **Restart required**: `noise_filter_enabled`, `denoise_3d_enabled`, `bit_depth`, `video_format`, `video_codec`, `video_quality`
For detailed configuration options, see [Camera Configuration API Guide](api/CAMERA_CONFIG_API.md).
## 📡 MQTT & Machine Status
### Get All Machines
```http
GET /machines
```
**Response**: `Dict[str, MachineStatusResponse]`
### Get MQTT Status
```http
GET /mqtt/status
```
**Response**: `MQTTStatusResponse`
```json
{
"connected": true,
"broker_host": "192.168.1.110",
"broker_port": 1883,
"subscribed_topics": ["vibratory_conveyor", "blower_separator"],
"last_message_time": "2024-01-15T10:30:00Z",
"message_count": 1250,
"error_count": 2,
"uptime_seconds": 3600.5
}
```
### Get MQTT Events History
```http
GET /mqtt/events?limit=10
```
**Response**: `MQTTEventsHistoryResponse`
```json
{
"events": [
{
"machine_name": "vibratory_conveyor",
"topic": "vibratory_conveyor",
"payload": "ON",
"normalized_state": "ON",
"timestamp": "2024-01-15T10:30:00Z",
"message_number": 1250
}
],
"total_events": 1250,
"last_updated": "2024-01-15T10:30:00Z"
}
```
## 💾 Storage & File Management
### Get Storage Statistics
```http
GET /storage/stats
```
**Response**: `StorageStatsResponse`
```json
{
"base_path": "/storage",
"total_files": 150,
"total_size_bytes": 5368709120,
"cameras": {
"camera1": {
"file_count": 75,
"total_size_bytes": 2684354560
},
"camera2": {
"file_count": 75,
"total_size_bytes": 2684354560
}
},
"disk_usage": {
"total_bytes": 107374182400,
"used_bytes": 53687091200,
"free_bytes": 53687091200,
"usage_percent": 50.0
}
}
```
### Get File List
```http
POST /storage/files
Content-Type: application/json
{
"camera_name": "camera1",
"start_date": "2024-01-15",
"end_date": "2024-01-16",
"limit": 50
}
```
**Response**: `FileListResponse`
```json
{
"files": [
{
"filename": "20240115_103000_test_recording.avi",
"camera_name": "camera1",
"size_bytes": 52428800,
"created_time": "2024-01-15T10:30:00Z",
"duration_seconds": 30.5
}
],
"total_count": 1
}
```
### Cleanup Old Files
```http
POST /storage/cleanup
Content-Type: application/json
{
"max_age_days": 30
}
```
**Response**: `CleanupResponse`
```json
{
"files_removed": 25,
"bytes_freed": 1073741824,
"errors": []
}
```
## 🔄 Camera Recovery & Diagnostics
### Test Camera Connection
```http
POST /cameras/{camera_name}/test-connection
```
**Response**: `CameraTestResponse`
### Reconnect Camera
```http
POST /cameras/{camera_name}/reconnect
```
**Response**: `CameraRecoveryResponse`
### Restart Camera Grab Process
```http
POST /cameras/{camera_name}/restart-grab
```
**Response**: `CameraRecoveryResponse`
### Reset Camera Timestamp
```http
POST /cameras/{camera_name}/reset-timestamp
```
**Response**: `CameraRecoveryResponse`
### Full Camera Reset
```http
POST /cameras/{camera_name}/full-reset
```
**Response**: `CameraRecoveryResponse`
### Reinitialize Camera
```http
POST /cameras/{camera_name}/reinitialize
```
**Response**: `CameraRecoveryResponse`
**Recovery Response Example**:
```json
{
"success": true,
"message": "Camera camera1 reconnected successfully",
"camera_name": "camera1",
"operation": "reconnect",
"timestamp": "2024-01-15T10:30:00Z"
}
```
## 📺 Live Streaming
### Get Live MJPEG Stream
```http
GET /cameras/{camera_name}/stream
```
**Response**: MJPEG video stream (multipart/x-mixed-replace)
### Start Camera Stream
```http
POST /cameras/{camera_name}/start-stream
```
### Stop Camera Stream
```http
POST /cameras/{camera_name}/stop-stream
```
**Streaming Features**:
- 📺 **MJPEG format**: Compatible with web browsers and React apps
- 🔄 **Concurrent operation**: Stream while recording simultaneously
-**Low latency**: Real-time preview for monitoring
For detailed streaming integration, see [Streaming Guide](guides/STREAMING_GUIDE.md).
## 🎬 Video Streaming & Playback
The system includes a comprehensive video streaming module that provides YouTube-like video playback capabilities with HTTP range request support, thumbnail generation, and intelligent caching.
### List Videos
```http
GET /videos/
```
**Query Parameters:**
- `camera_name` (optional): Filter by camera name
- `start_date` (optional): Filter videos created after this date (ISO format)
- `end_date` (optional): Filter videos created before this date (ISO format)
- `limit` (optional): Maximum number of results (default: 50, max: 1000)
- `include_metadata` (optional): Include video metadata (default: false)
**Response**: `VideoListResponse`
```json
{
"videos": [
{
"file_id": "camera1_auto_blower_separator_20250804_143022.mp4",
"camera_name": "camera1",
"filename": "camera1_auto_blower_separator_20250804_143022.mp4",
"file_size_bytes": 31457280,
"format": "mp4",
"status": "completed",
"created_at": "2025-08-04T14:30:22",
"start_time": "2025-08-04T14:30:22",
"end_time": "2025-08-04T14:32:22",
"machine_trigger": "blower_separator",
"is_streamable": true,
"needs_conversion": false,
"metadata": {
"duration_seconds": 120.5,
"width": 1920,
"height": 1080,
"fps": 30.0,
"codec": "mp4v",
"bitrate": 5000000,
"aspect_ratio": 1.777
}
}
],
"total_count": 1
}
```
### Get Video Information
```http
GET /videos/{file_id}
```
**Response**: `VideoInfoResponse` with detailed video information including metadata.
### Stream Video
```http
GET /videos/{file_id}/stream
```
**Headers:**
- `Range: bytes=0-1023` (optional): Request specific byte range for seeking
**Features:**
-**HTTP Range Requests**: Enables video seeking and progressive download
-**Partial Content**: Returns 206 status for range requests
-**Format Conversion**: Automatic AVI to MP4 conversion for web compatibility
-**Intelligent Caching**: Optimized performance with byte-range caching
-**CORS Enabled**: Ready for web browser integration
**Response Headers:**
- `Accept-Ranges: bytes`
- `Content-Length: {size}`
- `Content-Range: bytes {start}-{end}/{total}` (for range requests)
- `Cache-Control: public, max-age=3600`
### Get Video Thumbnail
```http
GET /videos/{file_id}/thumbnail?timestamp=5.0&width=320&height=240
```
**Query Parameters:**
- `timestamp` (optional): Time position in seconds (default: 1.0)
- `width` (optional): Thumbnail width in pixels (default: 320)
- `height` (optional): Thumbnail height in pixels (default: 240)
**Response**: JPEG image data with caching headers
### Get Streaming Information
```http
GET /videos/{file_id}/info
```
**Response**: `StreamingInfoResponse`
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"file_size_bytes": 52428800,
"content_type": "video/mp4",
"supports_range_requests": true,
"chunk_size_bytes": 262144
}
```
### Video Validation
```http
POST /videos/{file_id}/validate
```
**Response**: Validation status and accessibility check
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"is_valid": true
}
```
### Cache Management
```http
POST /videos/{file_id}/cache/invalidate
```
**Response**: Cache invalidation status
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"cache_invalidated": true
}
```
### Admin: Cache Cleanup
```http
POST /admin/videos/cache/cleanup?max_size_mb=100
```
**Response**: Cache cleanup results
```json
{
"cache_cleaned": true,
"entries_removed": 15,
"max_size_mb": 100
}
```
**Video Streaming Features**:
- 🎥 **Multiple Formats**: Native MP4 support with AVI conversion
- 📱 **Web Compatible**: Direct integration with HTML5 video elements
-**High Performance**: Intelligent caching and adaptive chunking
- 🖼️ **Thumbnail Generation**: Extract preview images at any timestamp
- 🔄 **Range Requests**: Efficient seeking and progressive download
## 🌐 WebSocket Real-time Updates
### Connect to WebSocket
```javascript
const ws = new WebSocket('ws://localhost:8000/ws');
ws.onmessage = (event) => {
const update = JSON.parse(event.data);
console.log('Real-time update:', update);
};
```
**WebSocket Message Types**:
- `system_status`: System status changes
- `camera_status`: Camera status updates
- `recording_started`: Recording start events
- `recording_stopped`: Recording stop events
- `mqtt_message`: MQTT message received
- `auto_recording_event`: Auto-recording status changes
**Example WebSocket Message**:
```json
{
"type": "recording_started",
"data": {
"camera_name": "camera1",
"filename": "20240115_103000_auto_recording.avi",
"timestamp": "2024-01-15T10:30:00Z"
},
"timestamp": "2024-01-15T10:30:00Z"
}
```
## 🚀 Quick Start Examples
### Basic System Monitoring
```bash
# Check system health
curl http://localhost:8000/health
# Get overall system status
curl http://localhost:8000/system/status
# Get all camera statuses
curl http://localhost:8000/cameras
```
### Manual Recording Control
```bash
# Start recording with default settings
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{"filename": "manual_test.avi"}'
# Start recording with custom camera settings
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{
"filename": "high_quality.avi",
"exposure_ms": 2.0,
"gain": 4.0,
"fps": 5.0
}'
# Stop recording
curl -X POST http://localhost:8000/cameras/camera1/stop-recording
```
### Auto-Recording Management
```bash
# Enable auto-recording for camera1
curl -X POST http://localhost:8000/cameras/camera1/auto-recording/enable
# Check auto-recording status
curl http://localhost:8000/auto-recording/status
# Disable auto-recording for camera1
curl -X POST http://localhost:8000/cameras/camera1/auto-recording/disable
```
### Video Streaming Operations
```bash
# List all videos
curl http://localhost:8000/videos/
# List videos from specific camera with metadata
curl "http://localhost:8000/videos/?camera_name=camera1&include_metadata=true"
# Get video information
curl http://localhost:8000/videos/camera1_recording_20250804_143022.avi
# Get video thumbnail
curl "http://localhost:8000/videos/camera1_recording_20250804_143022.avi/thumbnail?timestamp=5.0&width=320&height=240" \
--output thumbnail.jpg
# Get streaming info
curl http://localhost:8000/videos/camera1_recording_20250804_143022.avi/info
# Stream video with range request
curl -H "Range: bytes=0-1023" \
http://localhost:8000/videos/camera1_recording_20250804_143022.avi/stream
# Validate video file
curl -X POST http://localhost:8000/videos/camera1_recording_20250804_143022.avi/validate
# Clean up video cache (admin)
curl -X POST "http://localhost:8000/admin/videos/cache/cleanup?max_size_mb=100"
```
### Camera Configuration
```bash
# Get current camera configuration
curl http://localhost:8000/cameras/camera1/config
# Update camera settings (real-time)
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"exposure_ms": 1.5,
"gain": 3.0,
"sharpness": 130,
"contrast": 120
}'
```
## 📈 Recent API Changes & Enhancements
### ✨ New in Latest Version
#### 1. Enhanced Recording API
- **Dynamic camera settings**: Set exposure, gain, and FPS per recording
- **Automatic datetime prefixes**: All filenames get timestamp prefixes
- **Backward compatibility**: Existing API calls work unchanged
#### 2. Auto-Recording Feature
- **Per-camera control**: Enable/disable auto-recording individually
- **MQTT integration**: Automatic recording based on machine states
- **Retry logic**: Failed recordings are automatically retried
- **Status tracking**: Monitor auto-recording attempts and failures
#### 3. Advanced Camera Configuration
- **Real-time settings**: Update exposure, gain, image quality without restart
- **Image enhancement**: Sharpness, contrast, saturation, gamma controls
- **Noise reduction**: Configurable noise filtering and 3D denoising
- **HDR support**: High Dynamic Range imaging capabilities
#### 4. Live Streaming
- **MJPEG streaming**: Real-time camera preview
- **Concurrent operation**: Stream while recording simultaneously
- **Web-compatible**: Direct integration with React/HTML video elements
#### 5. Enhanced Monitoring
- **MQTT event history**: Track machine state changes over time
- **Storage statistics**: Monitor disk usage and file counts
- **WebSocket updates**: Real-time system status notifications
#### 6. Video Streaming Module
- **HTTP Range Requests**: Efficient video seeking and progressive download
- **Thumbnail Generation**: Extract preview images from videos at any timestamp
- **Format Conversion**: Automatic AVI to MP4 conversion for web compatibility
- **Intelligent Caching**: Byte-range caching for optimal streaming performance
- **Admin Tools**: Cache management and video validation endpoints
### 🔄 Migration Notes
#### From Previous Versions
1. **Recording API**: All existing calls work, but now return filenames with datetime prefixes
2. **Configuration**: New camera settings are optional and backward compatible
3. **Auto-recording**: New feature, requires enabling in `config.json` and per camera
#### Configuration Updates
```json
{
"cameras": [
{
"name": "camera1",
"auto_start_recording_enabled": true, // NEW: Enable auto-recording
"sharpness": 120, // NEW: Image quality settings
"contrast": 110,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": true,
"hdr_enabled": false
}
],
"system": {
"auto_recording_enabled": true // NEW: Global auto-recording toggle
}
}
```
## 🔗 Related Documentation
- [📷 Camera Configuration API Guide](api/CAMERA_CONFIG_API.md) - Detailed camera settings
- [🤖 Auto-Recording Feature Guide](features/AUTO_RECORDING_FEATURE_GUIDE.md) - React integration
- [📺 Streaming Guide](guides/STREAMING_GUIDE.md) - Live video streaming
- [🎬 Video Streaming Guide](VIDEO_STREAMING.md) - Video playback and streaming
- [🤖 AI Agent Video Integration Guide](AI_AGENT_VIDEO_INTEGRATION_GUIDE.md) - Complete integration guide for AI agents
- [🔧 Camera Recovery Guide](guides/CAMERA_RECOVERY_GUIDE.md) - Troubleshooting
- [📡 MQTT Logging Guide](guides/MQTT_LOGGING_GUIDE.md) - MQTT configuration
## 📞 Support & Integration
### API Base URL
- **Development**: `http://localhost:8000`
- **Production**: Configure in `config.json` under `system.api_host` and `system.api_port`
### Error Handling
All endpoints return standard HTTP status codes:
- `200`: Success
- `206`: Partial Content (for video range requests)
- `400`: Bad Request (invalid parameters)
- `404`: Resource not found (camera, file, video, etc.)
- `416`: Range Not Satisfiable (invalid video range request)
- `500`: Internal server error
- `503`: Service unavailable (camera manager, MQTT, etc.)
**Video Streaming Specific Errors:**
- `404`: Video file not found or not streamable
- `416`: Invalid range request (malformed Range header)
- `500`: Failed to read video data or generate thumbnail
### Rate Limiting
- No rate limiting currently implemented
- WebSocket connections are limited to reasonable concurrent connections
### CORS Support
- CORS is enabled for web dashboard integration
- Configure allowed origins in the API server settings
```
```

View File

@@ -1,195 +0,0 @@
# 🚀 USDA Vision Camera System - API Quick Reference
Quick reference for the most commonly used API endpoints. For complete documentation, see [API_DOCUMENTATION.md](API_DOCUMENTATION.md).
## 🔧 System Status
```bash
# Health check
curl http://localhost:8000/health
# System overview
curl http://localhost:8000/system/status
# All cameras
curl http://localhost:8000/cameras
# All machines
curl http://localhost:8000/machines
```
## 🎥 Recording Control
### Start Recording (Basic)
```bash
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{"filename": "test.avi"}'
```
### Start Recording (With Settings)
```bash
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{
"filename": "high_quality.avi",
"exposure_ms": 2.0,
"gain": 4.0,
"fps": 5.0
}'
```
### Stop Recording
```bash
curl -X POST http://localhost:8000/cameras/camera1/stop-recording
```
## 🤖 Auto-Recording
```bash
# Enable auto-recording
curl -X POST http://localhost:8000/cameras/camera1/auto-recording/enable
# Disable auto-recording
curl -X POST http://localhost:8000/cameras/camera1/auto-recording/disable
# Check auto-recording status
curl http://localhost:8000/auto-recording/status
```
## 🎛️ Camera Configuration
```bash
# Get camera config
curl http://localhost:8000/cameras/camera1/config
# Update camera settings
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"exposure_ms": 1.5,
"gain": 3.0,
"sharpness": 130
}'
```
## 📺 Live Streaming
```bash
# Start streaming
curl -X POST http://localhost:8000/cameras/camera1/start-stream
# Get MJPEG stream (use in browser/video element)
# http://localhost:8000/cameras/camera1/stream
# Stop streaming
curl -X POST http://localhost:8000/cameras/camera1/stop-stream
```
## 🔄 Camera Recovery
```bash
# Test connection
curl -X POST http://localhost:8000/cameras/camera1/test-connection
# Reconnect camera
curl -X POST http://localhost:8000/cameras/camera1/reconnect
# Full reset
curl -X POST http://localhost:8000/cameras/camera1/full-reset
```
## 💾 Storage Management
```bash
# Storage statistics
curl http://localhost:8000/storage/stats
# List files
curl -X POST http://localhost:8000/storage/files \
-H "Content-Type: application/json" \
-d '{"camera_name": "camera1", "limit": 10}'
# Cleanup old files
curl -X POST http://localhost:8000/storage/cleanup \
-H "Content-Type: application/json" \
-d '{"max_age_days": 30}'
```
## 📡 MQTT Monitoring
```bash
# MQTT status
curl http://localhost:8000/mqtt/status
# Recent MQTT events
curl http://localhost:8000/mqtt/events?limit=10
```
## 🌐 WebSocket Connection
```javascript
// Connect to real-time updates
const ws = new WebSocket('ws://localhost:8000/ws');
ws.onmessage = (event) => {
const update = JSON.parse(event.data);
console.log('Update:', update);
};
```
## 📊 Response Examples
### System Status Response
```json
{
"system_started": true,
"mqtt_connected": true,
"cameras": {
"camera1": {
"name": "camera1",
"status": "ACTIVE",
"is_recording": false,
"auto_recording_enabled": true
}
},
"active_recordings": 0,
"total_recordings": 15
}
```
### Recording Start Response
```json
{
"success": true,
"message": "Recording started for camera1",
"filename": "20240115_103000_test.avi"
}
```
### Camera Status Response
```json
{
"name": "camera1",
"status": "ACTIVE",
"is_recording": false,
"auto_recording_enabled": true,
"auto_recording_active": false,
"auto_recording_failure_count": 0
}
```
## 🔗 Related Documentation
- [📚 Complete API Documentation](API_DOCUMENTATION.md)
- [🎛️ Camera Configuration Guide](api/CAMERA_CONFIG_API.md)
- [🤖 Auto-Recording Feature Guide](features/AUTO_RECORDING_FEATURE_GUIDE.md)
- [📺 Streaming Guide](guides/STREAMING_GUIDE.md)
## 💡 Tips
- All filenames automatically get datetime prefixes: `YYYYMMDD_HHMMSS_`
- Camera settings can be updated in real-time during recording
- Auto-recording is controlled per camera and globally
- WebSocket provides real-time updates for dashboard integration
- CORS is enabled for web application integration

View File

@@ -1,217 +0,0 @@
# 📋 Current System Configuration Reference
## Overview
This document shows the exact current configuration structure of the USDA Vision Camera System, including all fields and their current values.
## 🔧 Complete Configuration Structure
### System Configuration (`config.json`)
```json
{
"mqtt": {
"broker_host": "192.168.1.110",
"broker_port": 1883,
"username": null,
"password": null,
"topics": {
"vibratory_conveyor": "vision/vibratory_conveyor/state",
"blower_separator": "vision/blower_separator/state"
}
},
"storage": {
"base_path": "/storage",
"max_file_size_mb": 1000,
"max_recording_duration_minutes": 60,
"cleanup_older_than_days": 30
},
"system": {
"camera_check_interval_seconds": 2,
"log_level": "DEBUG",
"log_file": "usda_vision_system.log",
"api_host": "0.0.0.0",
"api_port": 8000,
"enable_api": true,
"timezone": "America/New_York",
"auto_recording_enabled": true
},
"cameras": [
{
"name": "camera1",
"machine_topic": "blower_separator",
"storage_path": "/storage/camera1",
"exposure_ms": 0.3,
"gain": 4.0,
"target_fps": 0,
"enabled": true,
"video_format": "mp4",
"video_codec": "mp4v",
"video_quality": 95,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
"sharpness": 0,
"contrast": 100,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": false,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 0,
"wb_red_gain": 0.94,
"wb_green_gain": 1.0,
"wb_blue_gain": 0.87,
"anti_flicker_enabled": false,
"light_frequency": 0,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 2
},
{
"name": "camera2",
"machine_topic": "vibratory_conveyor",
"storage_path": "/storage/camera2",
"exposure_ms": 0.2,
"gain": 2.0,
"target_fps": 0,
"enabled": true,
"video_format": "mp4",
"video_codec": "mp4v",
"video_quality": 95,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
"sharpness": 0,
"contrast": 100,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": false,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 0,
"wb_red_gain": 1.01,
"wb_green_gain": 1.0,
"wb_blue_gain": 0.87,
"anti_flicker_enabled": false,
"light_frequency": 0,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 0
}
]
}
```
## 📊 Configuration Field Reference
### MQTT Settings
| Field | Value | Description |
|-------|-------|-------------|
| `broker_host` | `"192.168.1.110"` | MQTT broker IP address |
| `broker_port` | `1883` | MQTT broker port |
| `username` | `null` | MQTT authentication (not used) |
| `password` | `null` | MQTT authentication (not used) |
### MQTT Topics
| Machine | Topic | Camera |
|---------|-------|--------|
| Vibratory Conveyor | `vision/vibratory_conveyor/state` | camera2 |
| Blower Separator | `vision/blower_separator/state` | camera1 |
### Storage Settings
| Field | Value | Description |
|-------|-------|-------------|
| `base_path` | `"/mnt/nfs_share"` | Root storage directory |
| `max_file_size_mb` | `1000` | Maximum file size (1GB) |
| `max_recording_duration_minutes` | `60` | Maximum recording duration |
| `cleanup_older_than_days` | `30` | Auto-cleanup threshold |
### System Settings
| Field | Value | Description |
|-------|-------|-------------|
| `camera_check_interval_seconds` | `2` | Camera health check interval |
| `log_level` | `"DEBUG"` | Logging verbosity |
| `api_host` | `"0.0.0.0"` | API server bind address |
| `api_port` | `8000` | API server port |
| `timezone` | `"America/New_York"` | System timezone |
| `auto_recording_enabled` | `true` | Enable MQTT-triggered recording |
## 🎥 Camera Configuration Details
### Camera 1 (Blower Separator)
| Setting | Value | Description |
|---------|-------|-------------|
| **Basic Settings** | | |
| `name` | `"camera1"` | Camera identifier |
| `machine_topic` | `"blower_separator"` | MQTT topic to monitor |
| `storage_path` | `"/mnt/nfs_share/camera1"` | Video storage location |
| `exposure_ms` | `0.3` | Exposure time (milliseconds) |
| `gain` | `4.0` | Camera gain multiplier |
| `target_fps` | `0` | Target FPS (0 = unlimited) |
| **Video Recording** | | |
| `video_format` | `"mp4"` | Video file format |
| `video_codec` | `"mp4v"` | Video codec (MPEG-4) |
| `video_quality` | `95` | Video quality (0-100) |
| **Auto Recording** | | |
| `auto_start_recording_enabled` | `true` | Enable auto-recording |
| `auto_recording_max_retries` | `3` | Max retry attempts |
| `auto_recording_retry_delay_seconds` | `2` | Delay between retries |
| **Image Quality** | | |
| `sharpness` | `0` | Sharpness adjustment |
| `contrast` | `100` | Contrast level |
| `saturation` | `100` | Color saturation |
| `gamma` | `100` | Gamma correction |
| **White Balance** | | |
| `auto_white_balance` | `false` | Auto white balance disabled |
| `wb_red_gain` | `0.94` | Red channel gain |
| `wb_green_gain` | `1.0` | Green channel gain |
| `wb_blue_gain` | `0.87` | Blue channel gain |
| **Advanced** | | |
| `bit_depth` | `8` | Color bit depth |
| `hdr_enabled` | `false` | HDR disabled |
| `hdr_gain_mode` | `2` | HDR gain mode |
### Camera 2 (Vibratory Conveyor)
| Setting | Value | Difference from Camera 1 |
|---------|-------|--------------------------|
| `name` | `"camera2"` | Different identifier |
| `machine_topic` | `"vibratory_conveyor"` | Different MQTT topic |
| `storage_path` | `"/storage/camera2"` | Different storage path |
| `exposure_ms` | `0.2` | Faster exposure (0.2 vs 0.3) |
| `gain` | `2.0` | Lower gain (2.0 vs 4.0) |
| `wb_red_gain` | `1.01` | Different red balance (1.01 vs 0.94) |
| `hdr_gain_mode` | `0` | Different HDR mode (0 vs 2) |
*All other settings are identical to Camera 1*
## 🔄 Recent Changes
### MP4 Format Update
- **Added**: `video_format`, `video_codec`, `video_quality` fields
- **Changed**: Default recording format from AVI to MP4
- **Impact**: Requires service restart to take effect
### Current Status
- ✅ Configuration updated with MP4 settings
- ⚠️ Service restart required to apply changes
- 📁 Existing AVI files remain accessible
## 📝 Notes
1. **Target FPS = 0**: Both cameras use unlimited frame rate for maximum capture speed
2. **Auto Recording**: Both cameras automatically start recording when their respective machines turn on
3. **White Balance**: Manual white balance settings optimized for each camera's environment
4. **Storage**: Each camera has its own dedicated storage directory
5. **Video Quality**: Set to 95/100 for high-quality recordings with MP4 compression benefits
## 🔧 Configuration Management
To modify these settings:
1. Edit `config.json` file
2. Restart the camera service: `sudo ./start_system.sh`
3. Verify changes via API: `GET /cameras/{camera_name}/config`
For real-time settings (exposure, gain, fps), use the API without restart:
```bash
PUT /cameras/{camera_name}/config
```

View File

@@ -1,211 +0,0 @@
# 🎥 MP4 Video Format Update - Frontend Integration Guide
## Overview
The USDA Vision Camera System has been updated to record videos in **MP4 format** instead of AVI format for better streaming compatibility and smaller file sizes.
## 🔄 What Changed
### Video Format
- **Before**: AVI files with XVID codec (`.avi` extension)
- **After**: MP4 files with MPEG-4 codec (`.mp4` extension)
### File Extensions
- All new video recordings now use `.mp4` extension
- Existing `.avi` files remain accessible and functional
- File size reduction: ~40% smaller than equivalent AVI files
### API Response Updates
New fields added to camera configuration responses:
```json
{
"video_format": "mp4", // File format: "mp4" or "avi"
"video_codec": "mp4v", // Video codec: "mp4v", "XVID", "MJPG"
"video_quality": 95 // Quality: 0-100 (higher = better)
}
```
## 🌐 Frontend Impact
### 1. Video Player Compatibility
**✅ Better Browser Support**
- MP4 format has native support in all modern browsers
- No need for additional codecs or plugins
- Better mobile device compatibility (iOS/Android)
### 2. File Handling Updates
**File Extension Handling**
```javascript
// Update file extension checks
const isVideoFile = (filename) => {
return filename.endsWith('.mp4') || filename.endsWith('.avi');
};
// Video MIME type detection
const getVideoMimeType = (filename) => {
if (filename.endsWith('.mp4')) return 'video/mp4';
if (filename.endsWith('.avi')) return 'video/x-msvideo';
return 'video/mp4'; // default
};
```
### 3. Video Streaming
**Improved Streaming Performance**
```javascript
// MP4 files can be streamed directly without conversion
const videoUrl = `/api/videos/${videoId}/stream`;
// For HTML5 video element
<video controls>
<source src={videoUrl} type="video/mp4" />
Your browser does not support the video tag.
</video>
```
### 4. File Size Display
**Updated Size Expectations**
- MP4 files are ~40% smaller than equivalent AVI files
- Update any file size warnings or storage calculations
- Better compression means faster downloads and uploads
## 📡 API Changes
### Camera Configuration Endpoint
**GET** `/cameras/{camera_name}/config`
**New Response Fields:**
```json
{
"name": "camera1",
"machine_topic": "blower_separator",
"storage_path": "/storage/camera1",
"exposure_ms": 0.3,
"gain": 4.0,
"target_fps": 0,
"enabled": true,
"video_format": "mp4",
"video_codec": "mp4v",
"video_quality": 95,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
// ... other existing fields
}
```
### Video Listing Endpoints
**File Extension Updates**
- Video files in responses will now have `.mp4` extensions
- Existing `.avi` files will still appear in listings
- Filter by both extensions when needed
## 🔧 Configuration Options
### Video Format Settings
```json
{
"video_format": "mp4", // Options: "mp4", "avi"
"video_codec": "mp4v", // Options: "mp4v", "XVID", "MJPG"
"video_quality": 95 // Range: 0-100 (higher = better quality)
}
```
### Recommended Settings
- **Production**: `"mp4"` format, `"mp4v"` codec, `95` quality
- **Storage Optimized**: `"mp4"` format, `"mp4v"` codec, `85` quality
- **Legacy Mode**: `"avi"` format, `"XVID"` codec, `95` quality
## 🎯 Frontend Implementation Checklist
### ✅ Video Player Updates
- [ ] Verify HTML5 video player works with MP4 files
- [ ] Update video MIME type handling
- [ ] Test streaming performance with new format
### ✅ File Management
- [ ] Update file extension filters to include `.mp4`
- [ ] Modify file type detection logic
- [ ] Update download/upload handling for MP4 files
### ✅ UI/UX Updates
- [ ] Update file size expectations in UI
- [ ] Modify any format-specific icons or indicators
- [ ] Update help text or tooltips mentioning video formats
### ✅ Configuration Interface
- [ ] Add video format settings to camera config UI
- [ ] Include video quality slider/selector
- [ ] Add restart warning for video format changes
### ✅ Testing
- [ ] Test video playback with new MP4 files
- [ ] Verify backward compatibility with existing AVI files
- [ ] Test streaming performance and loading times
## 🔄 Backward Compatibility
### Existing AVI Files
- All existing `.avi` files remain fully functional
- No conversion or migration required
- Video player should handle both formats
### API Compatibility
- All existing API endpoints continue to work
- New fields are additive (won't break existing code)
- Default values provided for new configuration fields
## 📊 Performance Benefits
### File Size Reduction
```
Example 5-minute recording at 1280x1024:
- AVI/XVID: ~180 MB
- MP4/MPEG-4: ~108 MB (40% reduction)
```
### Streaming Improvements
- Faster initial load times
- Better progressive download support
- Reduced bandwidth usage
- Native browser optimization
### Storage Efficiency
- More recordings fit in same storage space
- Faster backup and transfer operations
- Reduced storage costs over time
## 🚨 Important Notes
### Restart Required
- Video format changes require camera service restart
- Mark video format settings as "restart required" in UI
- Provide clear user feedback about restart necessity
### Browser Compatibility
- MP4 format supported in all modern browsers
- Better mobile device support than AVI
- No additional plugins or codecs needed
### Quality Assurance
- Video quality maintained at 95/100 setting
- No visual degradation compared to AVI
- High bitrate ensures professional quality
## 🔗 Related Documentation
- [API Documentation](API_DOCUMENTATION.md) - Complete API reference
- [Camera Configuration API](api/CAMERA_CONFIG_API.md) - Detailed config options
- [Video Streaming Guide](VIDEO_STREAMING.md) - Streaming implementation
- [MP4 Conversion Summary](../MP4_CONVERSION_SUMMARY.md) - Technical details
## 📞 Support
If you encounter any issues with the MP4 format update:
1. **Video Playback Issues**: Check browser console for codec errors
2. **File Size Concerns**: Verify quality settings in camera config
3. **Streaming Problems**: Test with both MP4 and AVI files for comparison
4. **API Integration**: Refer to updated API documentation
The MP4 format provides better web compatibility and performance while maintaining the same high video quality required for the USDA vision system.

View File

@@ -1,212 +0,0 @@
# 🎉 USDA Vision Camera System - PROJECT COMPLETE!
## ✅ Final Status: READY FOR PRODUCTION
The USDA Vision Camera System has been successfully implemented, tested, and documented. All requirements have been met and the system is production-ready.
## 📋 Completed Requirements
### ✅ Core Functionality
- **MQTT Integration**: Dual topic listening for machine states
- **Automatic Recording**: Camera recording triggered by machine on/off states
- **GigE Camera Support**: Full integration with camera SDK library
- **Multi-threading**: Concurrent MQTT + camera monitoring + recording
- **File Management**: Timestamp-based naming in organized directories
### ✅ Advanced Features
- **REST API**: Complete FastAPI server with all endpoints
- **WebSocket Support**: Real-time updates for dashboard integration
- **Time Synchronization**: Atlanta, Georgia timezone with NTP sync
- **Storage Management**: File indexing, cleanup, and statistics
- **Comprehensive Logging**: Rotating logs with error tracking
- **Configuration System**: JSON-based configuration management
### ✅ Documentation & Testing
- **Complete README**: Installation, usage, API docs, troubleshooting
- **Test Suite**: Comprehensive system testing (`test_system.py`)
- **Time Verification**: Timezone and sync testing (`check_time.py`)
- **Startup Scripts**: Easy deployment with `start_system.sh`
- **Clean Repository**: Organized structure with proper .gitignore
## 🏗️ Final Project Structure
```
USDA-Vision-Cameras/
├── README.md # Complete documentation
├── main.py # System entry point
├── config.json # System configuration
├── requirements.txt # Python dependencies
├── pyproject.toml # UV package configuration
├── .gitignore # Git ignore rules
├── start_system.sh # Startup script
├── setup_timezone.sh # Time sync setup
├── test_system.py # System test suite
├── check_time.py # Time verification
├── test_timezone.py # Timezone testing
├── usda_vision_system/ # Main application
│ ├── core/ # Core functionality
│ ├── mqtt/ # MQTT integration
│ ├── camera/ # Camera management
│ ├── storage/ # File management
│ ├── api/ # REST API server
│ └── main.py # Application coordinator
├── camera_sdk/ # GigE camera SDK library
├── demos/ # Demo and example code
│ ├── cv_grab*.py # Camera SDK usage examples
│ └── mqtt_*.py # MQTT demo scripts
├── storage/ # Recording storage
│ ├── camera1/ # Camera 1 recordings
│ └── camera2/ # Camera 2 recordings
├── tests/ # Test files and legacy tests
├── notebooks/ # Jupyter notebooks
└── docs/ # Documentation files
```
## 🚀 How to Deploy
### 1. Clone and Setup
```bash
git clone https://github.com/your-username/USDA-Vision-Cameras.git
cd USDA-Vision-Cameras
uv sync
```
### 2. Configure System
```bash
# Edit config.json for your environment
# Set MQTT broker, camera settings, storage paths
```
### 3. Setup Time Sync
```bash
./setup_timezone.sh
```
### 4. Test System
```bash
python test_system.py
```
### 5. Start System
```bash
./start_system.sh
```
## 🌐 API Integration
### Dashboard Integration
```javascript
// React component example
const systemStatus = await fetch('http://localhost:8000/system/status');
const cameras = await fetch('http://localhost:8000/cameras');
// WebSocket for real-time updates
const ws = new WebSocket('ws://localhost:8000/ws');
ws.onmessage = (event) => {
const update = JSON.parse(event.data);
// Handle real-time system updates
};
```
### Manual Control
```bash
# Start recording manually
curl -X POST http://localhost:8000/cameras/camera1/start-recording
# Stop recording manually
curl -X POST http://localhost:8000/cameras/camera1/stop-recording
# Get system status
curl http://localhost:8000/system/status
```
## 📊 System Capabilities
### Discovered Hardware
- **2 GigE Cameras**: Blower-Yield-Cam, Cracker-Cam
- **Network Ready**: Cameras accessible at 192.168.1.165, 192.168.1.167
- **MQTT Ready**: Configured for broker at 192.168.1.110
### Recording Features
- **Automatic Start/Stop**: Based on MQTT machine states
- **Timezone Aware**: Atlanta time timestamps (EST/EDT)
- **Organized Storage**: Separate directories per camera
- **File Naming**: `camera1_recording_20250725_213000.avi`
- **Manual Control**: API endpoints for manual recording
### Monitoring Features
- **Real-time Status**: Camera and machine state monitoring
- **Health Checks**: Automatic system health verification
- **Performance Tracking**: Recording metrics and system stats
- **Error Handling**: Comprehensive error tracking and recovery
## 🔧 Maintenance
### Regular Tasks
- **Log Monitoring**: Check `usda_vision_system.log`
- **Storage Cleanup**: Automatic cleanup of old recordings
- **Time Sync**: Automatic NTP synchronization
- **Health Checks**: Built-in system monitoring
### Troubleshooting
- **Test Suite**: `python test_system.py`
- **Time Check**: `python check_time.py`
- **API Health**: `curl http://localhost:8000/health`
- **Debug Mode**: `python main.py --log-level DEBUG`
## 🎯 Production Readiness
### ✅ All Tests Passing
- System initialization: ✅
- Camera discovery: ✅ (2 cameras found)
- MQTT configuration: ✅
- Storage setup: ✅
- Time synchronization: ✅
- API endpoints: ✅
### ✅ Documentation Complete
- Installation guide: ✅
- Configuration reference: ✅
- API documentation: ✅
- Troubleshooting guide: ✅
- Integration examples: ✅
### ✅ Production Features
- Error handling: ✅
- Logging system: ✅
- Time synchronization: ✅
- Storage management: ✅
- API security: ✅
- Performance monitoring: ✅
## 🚀 Next Steps
The system is now ready for:
1. **Production Deployment**: Deploy on target hardware
2. **Dashboard Integration**: Connect to React + Supabase dashboard
3. **MQTT Configuration**: Connect to production MQTT broker
4. **Camera Calibration**: Fine-tune camera settings for production
5. **Monitoring Setup**: Configure production monitoring and alerts
## 📞 Support
For ongoing support:
- **Documentation**: Complete README.md with troubleshooting
- **Test Suite**: Comprehensive diagnostic tools
- **Logging**: Detailed system logs for debugging
- **API Health**: Built-in health check endpoints
---
**🎊 PROJECT STATUS: COMPLETE AND PRODUCTION-READY! 🎊**
The USDA Vision Camera System is fully implemented, tested, and documented. All original requirements have been met, and the system is ready for production deployment with your React dashboard integration.
**Key Achievements:**
- ✅ Dual MQTT topic monitoring
- ✅ Automatic camera recording
- ✅ Atlanta timezone synchronization
- ✅ Complete REST API
- ✅ Comprehensive documentation
- ✅ Production-ready deployment

View File

@@ -1,276 +0,0 @@
# 🚀 React Frontend Integration Guide - MP4 Update
## 🎯 Quick Summary for React Team
The camera system now records in **MP4 format** instead of AVI. This provides better web compatibility and smaller file sizes.
## 🔄 What You Need to Update
### 1. File Extension Handling
```javascript
// OLD: Only checked for .avi
const isVideoFile = (filename) => filename.endsWith('.avi');
// NEW: Check for both formats
const isVideoFile = (filename) => {
return filename.endsWith('.mp4') || filename.endsWith('.avi');
};
// Video MIME types
const getVideoMimeType = (filename) => {
if (filename.endsWith('.mp4')) return 'video/mp4';
if (filename.endsWith('.avi')) return 'video/x-msvideo';
return 'video/mp4'; // default for new files
};
```
### 2. Video Player Component
```jsx
// MP4 files work better with HTML5 video
const VideoPlayer = ({ videoUrl, filename }) => {
const mimeType = getVideoMimeType(filename);
return (
<video controls width="100%" height="auto">
<source src={videoUrl} type={mimeType} />
Your browser does not support the video tag.
</video>
);
};
```
### 3. Camera Configuration Interface
Add these new fields to your camera config forms:
```jsx
const CameraConfigForm = () => {
const [config, setConfig] = useState({
// ... existing fields
video_format: 'mp4', // 'mp4' or 'avi'
video_codec: 'mp4v', // 'mp4v', 'XVID', 'MJPG'
video_quality: 95 // 0-100
});
return (
<form>
{/* ... existing fields */}
<div className="video-settings">
<h3>Video Recording Settings</h3>
<select
value={config.video_format}
onChange={(e) => setConfig({...config, video_format: e.target.value})}
>
<option value="mp4">MP4 (Recommended)</option>
<option value="avi">AVI (Legacy)</option>
</select>
<select
value={config.video_codec}
onChange={(e) => setConfig({...config, video_codec: e.target.value})}
>
<option value="mp4v">MPEG-4 (mp4v)</option>
<option value="XVID">Xvid</option>
<option value="MJPG">Motion JPEG</option>
</select>
<input
type="range"
min="50"
max="100"
value={config.video_quality}
onChange={(e) => setConfig({...config, video_quality: parseInt(e.target.value)})}
/>
<label>Quality: {config.video_quality}%</label>
<div className="warning">
Video format changes require camera restart
</div>
</div>
</form>
);
};
```
## 📡 API Response Changes
### Camera Configuration Response
```json
{
"name": "camera1",
"machine_topic": "blower_separator",
"storage_path": "/storage/camera1",
"exposure_ms": 0.3,
"gain": 4.0,
"target_fps": 0,
"enabled": true,
"video_format": "mp4",
"video_codec": "mp4v",
"video_quality": 95,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
// ... other existing fields
}
```
### Video File Listings
```json
{
"videos": [
{
"file_id": "camera1_recording_20250804_143022.mp4",
"filename": "camera1_recording_20250804_143022.mp4",
"format": "mp4",
"file_size_bytes": 31457280,
"created_at": "2025-08-04T14:30:22"
}
]
}
```
## 🎨 UI/UX Improvements
### File Size Display
```javascript
// MP4 files are ~40% smaller
const formatFileSize = (bytes) => {
const mb = bytes / (1024 * 1024);
return `${mb.toFixed(1)} MB`;
};
// Show format in file listings
const FileListItem = ({ video }) => (
<div className="file-item">
<span className="filename">{video.filename}</span>
<span className={`format ${video.format}`}>
{video.format.toUpperCase()}
</span>
<span className="size">{formatFileSize(video.file_size_bytes)}</span>
</div>
);
```
### Format Indicators
```css
.format.mp4 {
background: #4CAF50;
color: white;
padding: 2px 6px;
border-radius: 3px;
font-size: 0.8em;
}
.format.avi {
background: #FF9800;
color: white;
padding: 2px 6px;
border-radius: 3px;
font-size: 0.8em;
}
```
## ⚡ Performance Benefits
### Streaming Improvements
- **Faster Loading**: MP4 files start playing sooner
- **Better Seeking**: More responsive video scrubbing
- **Mobile Friendly**: Better iOS/Android compatibility
- **Bandwidth Savings**: 40% smaller files = faster transfers
### Implementation Tips
```javascript
// Preload video metadata for better UX
const VideoThumbnail = ({ videoUrl }) => (
<video
preload="metadata"
poster={`${videoUrl}?t=1`} // Thumbnail at 1 second
onLoadedMetadata={(e) => {
console.log('Duration:', e.target.duration);
}}
>
<source src={videoUrl} type="video/mp4" />
</video>
);
```
## 🔧 Configuration Management
### Restart Warning Component
```jsx
const RestartWarning = ({ show }) => {
if (!show) return null;
return (
<div className="alert alert-warning">
<strong> Restart Required</strong>
<p>Video format changes require a camera service restart to take effect.</p>
<button onClick={handleRestart}>Restart Camera Service</button>
</div>
);
};
```
### Settings Validation
```javascript
const validateVideoSettings = (settings) => {
const errors = {};
if (!['mp4', 'avi'].includes(settings.video_format)) {
errors.video_format = 'Must be mp4 or avi';
}
if (!['mp4v', 'XVID', 'MJPG'].includes(settings.video_codec)) {
errors.video_codec = 'Invalid codec';
}
if (settings.video_quality < 50 || settings.video_quality > 100) {
errors.video_quality = 'Quality must be between 50-100';
}
return errors;
};
```
## 📱 Mobile Considerations
### Responsive Video Player
```jsx
const ResponsiveVideoPlayer = ({ videoUrl, filename }) => (
<div className="video-container">
<video
controls
playsInline // Important for iOS
preload="metadata"
style={{ width: '100%', height: 'auto' }}
>
<source src={videoUrl} type={getVideoMimeType(filename)} />
<p>Your browser doesn't support HTML5 video.</p>
</video>
</div>
);
```
## 🧪 Testing Checklist
- [ ] Video playback works with new MP4 files
- [ ] File extension filtering includes both .mp4 and .avi
- [ ] Camera configuration UI shows video format options
- [ ] Restart warning appears for video format changes
- [ ] File size displays are updated for smaller MP4 files
- [ ] Mobile video playback works correctly
- [ ] Video streaming performance is improved
- [ ] Backward compatibility with existing AVI files
## 📞 Support
If you encounter issues:
1. **Video won't play**: Check browser console for codec errors
2. **File size unexpected**: Verify quality settings in camera config
3. **Streaming slow**: Compare MP4 vs AVI performance
4. **Mobile issues**: Ensure `playsInline` attribute is set
The MP4 update provides significant improvements in web compatibility and performance while maintaining full backward compatibility with existing AVI files.

View File

@@ -1,100 +0,0 @@
# USDA Vision Camera System - Documentation
This directory contains detailed documentation for the USDA Vision Camera System.
## Documentation Files
### 🚀 [API_DOCUMENTATION.md](API_DOCUMENTATION.md) **⭐ NEW**
**Complete API reference documentation** covering all endpoints, features, and recent enhancements:
- System status and health monitoring
- Camera management and configuration
- Recording control with dynamic settings
- Auto-recording management
- MQTT and machine status
- Storage and file management
- Camera recovery and diagnostics
- Live streaming capabilities
- WebSocket real-time updates
- Quick start examples and migration notes
### ⚡ [API_QUICK_REFERENCE.md](API_QUICK_REFERENCE.md) **⭐ NEW**
**Quick reference card** for the most commonly used API endpoints with curl examples and response formats.
### 📋 [PROJECT_COMPLETE.md](PROJECT_COMPLETE.md)
Complete project overview and final status documentation. Contains:
- Project completion status
- Final system architecture
- Deployment instructions
- Production readiness checklist
### 🎥 [MP4_FORMAT_UPDATE.md](MP4_FORMAT_UPDATE.md) **⭐ NEW**
**Frontend integration guide** for the MP4 video format update:
- Video format changes from AVI to MP4
- Frontend implementation checklist
- API response updates
- Performance benefits and browser compatibility
### 🚀 [REACT_INTEGRATION_GUIDE.md](REACT_INTEGRATION_GUIDE.md) **⭐ NEW**
**Quick reference for React developers** implementing the MP4 format changes:
- Code examples and components
- File handling updates
- Configuration interface
- Testing checklist
### 📋 [CURRENT_CONFIGURATION.md](CURRENT_CONFIGURATION.md) **⭐ NEW**
**Complete current system configuration reference**:
- Exact config.json structure with all current values
- Field-by-field documentation
- Camera-specific settings comparison
- MQTT topics and machine mappings
### 🎬 [VIDEO_STREAMING.md](VIDEO_STREAMING.md) **⭐ UPDATED**
**Complete video streaming module documentation**:
- Comprehensive API endpoint documentation
- Authentication and security information
- Error handling and troubleshooting
- Performance optimization guidelines
### 🤖 [AI_AGENT_VIDEO_INTEGRATION_GUIDE.md](AI_AGENT_VIDEO_INTEGRATION_GUIDE.md) **⭐ NEW**
**Complete integration guide for AI agents and external systems**:
- Step-by-step integration workflow
- Programming language examples (Python, JavaScript)
- Error handling and debugging strategies
- Performance optimization recommendations
### 🔧 [API_CHANGES_SUMMARY.md](API_CHANGES_SUMMARY.md)
Summary of API changes and enhancements made to the system.
### 📷 [CAMERA_RECOVERY_GUIDE.md](CAMERA_RECOVERY_GUIDE.md)
Guide for camera recovery procedures and troubleshooting camera-related issues.
### 📡 [MQTT_LOGGING_GUIDE.md](MQTT_LOGGING_GUIDE.md)
Comprehensive guide for MQTT logging configuration and troubleshooting.
## Main Documentation
The main system documentation is located in the root directory:
- **[../README.md](../README.md)** - Primary system documentation with installation, configuration, and usage instructions
## Additional Resources
### Demo Code
- **[../demos/](../demos/)** - Demo scripts and camera SDK examples
### Test Files
- **[../tests/](../tests/)** - Test scripts and legacy test files
### Jupyter Notebooks
- **[../notebooks/](../notebooks/)** - Interactive notebooks for system exploration and testing
## Quick Links
- [System Installation](../README.md#installation)
- [Configuration Guide](../README.md#configuration)
- [API Documentation](../README.md#api-reference)
- [Troubleshooting](../README.md#troubleshooting)
- [Camera SDK Examples](../demos/camera_sdk_examples/)
## Support
For technical support and questions, refer to the main [README.md](../README.md) troubleshooting section or check the system logs.

View File

@@ -1,601 +0,0 @@
# 🎬 Video Streaming Module
The USDA Vision Camera System now includes a modular video streaming system that provides YouTube-like video playback capabilities for your React web application.
## 🌟 Features
- **Progressive Streaming** - True chunked streaming for web browsers (no download required)
- **HTTP Range Request Support** - Enables seeking and progressive download with 206 Partial Content
- **Native MP4 Support** - Direct streaming of MP4 files optimized for web playback
- **Memory Efficient** - 8KB chunked delivery, no large file loading into memory
- **Browser Compatible** - Works with HTML5 `<video>` tag in all modern browsers
- **Intelligent Caching** - Optimized streaming performance with byte-range caching
- **Thumbnail Generation** - Extract preview images from videos
- **Modular Architecture** - Clean separation of concerns
- **No Authentication Required** - Open access for internal network use
- **CORS Enabled** - Ready for web browser integration
## 🏗️ Architecture
The video module follows clean architecture principles:
```
usda_vision_system/video/
├── domain/ # Business logic (pure Python)
├── infrastructure/ # External dependencies (OpenCV, FFmpeg)
├── application/ # Use cases and orchestration
├── presentation/ # HTTP controllers and API routes
└── integration.py # Dependency injection and composition
```
## 🚀 API Endpoints
### List Videos
```http
GET /videos/
```
**Query Parameters:**
- `camera_name` (optional): Filter by camera name
- `start_date` (optional): Filter videos created after this date (ISO format: 2025-08-04T14:30:22)
- `end_date` (optional): Filter videos created before this date (ISO format: 2025-08-04T14:30:22)
- `limit` (optional): Maximum results (default: 50, max: 1000)
- `include_metadata` (optional): Include video metadata (default: false)
**Example Request:**
```bash
curl "http://localhost:8000/videos/?camera_name=camera1&include_metadata=true&limit=10"
```
**Response:**
```json
{
"videos": [
{
"file_id": "camera1_auto_blower_separator_20250804_143022.mp4",
"camera_name": "camera1",
"filename": "camera1_auto_blower_separator_20250804_143022.mp4",
"file_size_bytes": 31457280,
"format": "mp4",
"status": "completed",
"created_at": "2025-08-04T14:30:22",
"start_time": "2025-08-04T14:30:22",
"end_time": "2025-08-04T14:32:22",
"machine_trigger": "blower_separator",
"is_streamable": true,
"needs_conversion": false,
"metadata": {
"duration_seconds": 120.5,
"width": 1920,
"height": 1080,
"fps": 30.0,
"codec": "mp4v",
"bitrate": 5000000,
"aspect_ratio": 1.777
}
}
],
"total_count": 1
}
```
### Stream Video
```http
GET /videos/{file_id}/stream
```
**Headers:**
- `Range: bytes=0-1023` (optional): Request specific byte range for seeking
**Example Requests:**
```bash
# Stream entire video (progressive streaming)
curl http://localhost:8000/videos/camera1_auto_blower_separator_20250805_123329.mp4/stream
# Stream specific byte range (for seeking)
curl -H "Range: bytes=0-1023" \
http://localhost:8000/videos/camera1_auto_blower_separator_20250805_123329.mp4/stream
```
**Response Headers:**
- `Accept-Ranges: bytes`
- `Content-Length: {size}`
- `Content-Range: bytes {start}-{end}/{total}` (for range requests)
- `Cache-Control: public, max-age=3600`
- `Content-Type: video/mp4`
**Streaming Implementation:**
-**Progressive Streaming**: Uses FastAPI `StreamingResponse` with 8KB chunks
-**HTTP Range Requests**: Returns 206 Partial Content for seeking
-**Memory Efficient**: No large file loading, streams directly from disk
-**Browser Compatible**: Works with HTML5 `<video>` tag playback
-**Chunked Delivery**: Optimal 8KB chunk size for smooth playback
-**CORS Enabled**: Ready for web browser integration
**Response Status Codes:**
- `200 OK`: Full video streaming (progressive chunks)
- `206 Partial Content`: Range request successful
- `404 Not Found`: Video not found or not streamable
- `416 Range Not Satisfiable`: Invalid range request
### Get Video Info
```http
GET /videos/{file_id}
```
**Example Request:**
```bash
curl http://localhost:8000/videos/camera1_recording_20250804_143022.avi
```
**Response includes complete metadata:**
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"camera_name": "camera1",
"filename": "camera1_recording_20250804_143022.avi",
"file_size_bytes": 52428800,
"format": "avi",
"status": "completed",
"created_at": "2025-08-04T14:30:22",
"start_time": "2025-08-04T14:30:22",
"end_time": "2025-08-04T14:32:22",
"machine_trigger": "vibratory_conveyor",
"is_streamable": true,
"needs_conversion": true,
"metadata": {
"duration_seconds": 120.5,
"width": 1920,
"height": 1080,
"fps": 30.0,
"codec": "XVID",
"bitrate": 5000000,
"aspect_ratio": 1.777
}
}
```
### Get Thumbnail
```http
GET /videos/{file_id}/thumbnail?timestamp=5.0&width=320&height=240
```
**Query Parameters:**
- `timestamp` (optional): Time position in seconds to extract thumbnail from (default: 1.0)
- `width` (optional): Thumbnail width in pixels (default: 320)
- `height` (optional): Thumbnail height in pixels (default: 240)
**Example Request:**
```bash
curl "http://localhost:8000/videos/camera1_recording_20250804_143022.avi/thumbnail?timestamp=5.0&width=320&height=240" \
--output thumbnail.jpg
```
**Response**: JPEG image data with caching headers
- `Content-Type: image/jpeg`
- `Cache-Control: public, max-age=3600`
### Streaming Info
```http
GET /videos/{file_id}/info
```
**Example Request:**
```bash
curl http://localhost:8000/videos/camera1_recording_20250804_143022.avi/info
```
**Response**: Technical streaming details
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"file_size_bytes": 52428800,
"content_type": "video/x-msvideo",
"supports_range_requests": true,
"chunk_size_bytes": 262144
}
```
### Video Validation
```http
POST /videos/{file_id}/validate
```
**Example Request:**
```bash
curl -X POST http://localhost:8000/videos/camera1_recording_20250804_143022.avi/validate
```
**Response**: Validation status
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"is_valid": true
}
```
### Cache Management
```http
POST /videos/{file_id}/cache/invalidate
```
**Example Request:**
```bash
curl -X POST http://localhost:8000/videos/camera1_recording_20250804_143022.avi/cache/invalidate
```
**Response**: Cache invalidation status
```json
{
"file_id": "camera1_recording_20250804_143022.avi",
"cache_invalidated": true
}
```
### Admin: Cache Cleanup
```http
POST /admin/videos/cache/cleanup?max_size_mb=100
```
**Example Request:**
```bash
curl -X POST "http://localhost:8000/admin/videos/cache/cleanup?max_size_mb=100"
```
**Response**: Cache cleanup results
```json
{
"cache_cleaned": true,
"entries_removed": 15,
"max_size_mb": 100
}
```
## 🌐 React Integration
### Basic Video Player
```jsx
function VideoPlayer({ fileId }) {
return (
<video
controls
width="100%"
preload="metadata"
style={{ maxWidth: '800px' }}
>
<source
src={`${API_BASE_URL}/videos/${fileId}/stream`}
type="video/mp4"
/>
Your browser does not support video playback.
</video>
);
}
```
### Advanced Player with Thumbnail
```jsx
function VideoPlayerWithThumbnail({ fileId }) {
const [thumbnail, setThumbnail] = useState(null);
useEffect(() => {
fetch(`${API_BASE_URL}/videos/${fileId}/thumbnail`)
.then(response => response.blob())
.then(blob => setThumbnail(URL.createObjectURL(blob)));
}, [fileId]);
return (
<video controls width="100%" poster={thumbnail}>
<source
src={`${API_BASE_URL}/videos/${fileId}/stream`}
type="video/mp4"
/>
</video>
);
}
```
### Video List Component
```jsx
function VideoList({ cameraName }) {
const [videos, setVideos] = useState([]);
useEffect(() => {
const params = new URLSearchParams();
if (cameraName) params.append('camera_name', cameraName);
params.append('include_metadata', 'true');
fetch(`${API_BASE_URL}/videos/?${params}`)
.then(response => response.json())
.then(data => setVideos(data.videos));
}, [cameraName]);
return (
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
{videos.map(video => (
<VideoCard key={video.file_id} video={video} />
))}
</div>
);
}
```
## 🔧 Configuration
The video module is automatically initialized when the API server starts. Configuration options:
```python
# In your API server initialization
video_module = create_video_module(
config=config,
storage_manager=storage_manager,
enable_caching=True, # Enable streaming cache
enable_conversion=True # Enable format conversion
)
```
### Configuration Parameters
- **`enable_caching`**: Enable/disable intelligent byte-range caching (default: True)
- **`cache_size_mb`**: Maximum cache size in MB (default: 100)
- **`cache_max_age_minutes`**: Cache entry expiration time (default: 30)
- **`enable_conversion`**: Enable/disable automatic AVI to MP4 conversion (default: True)
- **`conversion_quality`**: Video conversion quality: "low", "medium", "high" (default: "medium")
### System Requirements
- **OpenCV**: Required for thumbnail generation and metadata extraction
- **FFmpeg**: Optional, for video format conversion (graceful fallback if not available)
- **Storage**: Sufficient disk space for video files and cache
- **Memory**: Recommended 2GB+ RAM for caching and video processing
## 🔐 Authentication & Security
### Current Security Model
**⚠️ IMPORTANT: No authentication is currently implemented.**
- **Open Access**: All video streaming endpoints are publicly accessible
- **CORS Policy**: Currently set to allow all origins (`allow_origins=["*"]`)
- **Network Security**: Designed for internal network use only
- **No API Keys**: No authentication tokens or API keys required
- **No Rate Limiting**: No request rate limiting currently implemented
### Security Considerations for Production
#### For Internal Network Deployment
```bash
# Current configuration is suitable for:
# - Internal corporate networks
# - Isolated network segments
# - Development and testing environments
```
#### For External Access (Recommendations)
If you need to expose the video streaming API externally, consider implementing:
1. **Authentication Layer**
```python
# Example: Add JWT authentication
from fastapi import Depends, HTTPException
from fastapi.security import HTTPBearer
security = HTTPBearer()
async def verify_token(token: str = Depends(security)):
# Implement token verification logic
pass
```
2. **CORS Configuration**
```python
# Restrict CORS to specific domains
app.add_middleware(
CORSMiddleware,
allow_origins=["https://yourdomain.com"],
allow_credentials=True,
allow_methods=["GET", "POST"],
allow_headers=["*"]
)
```
3. **Rate Limiting**
```python
# Example: Add rate limiting
from slowapi import Limiter
limiter = Limiter(key_func=get_remote_address)
@app.get("/videos/")
@limiter.limit("10/minute")
async def list_videos():
pass
```
4. **Network Security**
- Use HTTPS/TLS for encrypted communication
- Implement firewall rules to restrict access
- Consider VPN access for remote users
- Use reverse proxy (nginx) for additional security
### Access Control Summary
```
┌─────────────────────────────────────────────────────────────┐
│ Current Access Model │
├─────────────────────────────────────────────────────────────┤
│ Authentication: ❌ None │
│ Authorization: ❌ None │
│ CORS: ✅ Enabled (all origins) │
│ Rate Limiting: ❌ None │
│ HTTPS: ⚠️ Depends on deployment │
│ Network Security: ⚠️ Firewall/VPN recommended │
└─────────────────────────────────────────────────────────────┘
```
## 📊 Performance
- **Caching**: Intelligent byte-range caching reduces disk I/O
- **Adaptive Chunking**: Optimal chunk sizes based on file size
- **Range Requests**: Only download needed portions
- **Format Conversion**: Automatic conversion to web-compatible formats
## 🛠️ Service Management
### Restart Service
```bash
sudo systemctl restart usda-vision-camera
```
### Check Status
```bash
# Check video module status
curl http://localhost:8000/system/video-module
# Check available videos
curl http://localhost:8000/videos/
```
### Logs
```bash
sudo journalctl -u usda-vision-camera -f
```
## 🧪 Testing
Run the video module tests:
```bash
cd /home/alireza/USDA-vision-cameras
PYTHONPATH=/home/alireza/USDA-vision-cameras python tests/test_video_module.py
```
## 🔍 Troubleshooting
### Video Not Playing
1. **Check if file exists**: `GET /videos/{file_id}`
```bash
curl http://localhost:8000/videos/camera1_recording_20250804_143022.avi
```
2. **Verify streaming info**: `GET /videos/{file_id}/info`
```bash
curl http://localhost:8000/videos/camera1_recording_20250804_143022.avi/info
```
3. **Test direct stream**: `GET /videos/{file_id}/stream`
```bash
curl -I http://localhost:8000/videos/camera1_recording_20250804_143022.avi/stream
```
4. **Validate video file**: `POST /videos/{file_id}/validate`
```bash
curl -X POST http://localhost:8000/videos/camera1_recording_20250804_143022.avi/validate
```
### Performance Issues
1. **Check cache status**: Clean up cache if needed
```bash
curl -X POST "http://localhost:8000/admin/videos/cache/cleanup?max_size_mb=100"
```
2. **Monitor system resources**: Check CPU, memory, and disk usage
3. **Adjust cache size**: Modify configuration parameters
4. **Invalidate specific cache**: For updated files
```bash
curl -X POST http://localhost:8000/videos/{file_id}/cache/invalidate
```
### Format Issues
- **AVI files**: Automatically converted to MP4 for web compatibility
- **Conversion requires FFmpeg**: Optional dependency with graceful fallback
- **Supported formats**: AVI (with conversion), MP4 (native), WebM (native)
### Common HTTP Status Codes
- **200**: Success - Video streamed successfully
- **206**: Partial Content - Range request successful
- **404**: Not Found - Video file doesn't exist or isn't streamable
- **416**: Range Not Satisfiable - Invalid range request
- **500**: Internal Server Error - Failed to read video data or generate thumbnail
### Browser Compatibility
- **Chrome/Chromium**: Full support for MP4 and range requests
- **Firefox**: Full support for MP4 and range requests
- **Safari**: Full support for MP4 and range requests
- **Edge**: Full support for MP4 and range requests
- **Mobile browsers**: Generally good support for MP4 streaming
### Error Scenarios and Solutions
#### Video File Issues
```bash
# Problem: Video not found (404)
curl http://localhost:8000/videos/nonexistent_video.mp4
# Response: {"detail": "Video nonexistent_video.mp4 not found"}
# Solution: Verify file_id exists using list endpoint
# Problem: Video not streamable
curl http://localhost:8000/videos/corrupted_video.avi/stream
# Response: {"detail": "Video corrupted_video.avi not found or not streamable"}
# Solution: Use validation endpoint to check file integrity
```
#### Range Request Issues
```bash
# Problem: Invalid range request (416)
curl -H "Range: bytes=999999999-" http://localhost:8000/videos/small_video.mp4/stream
# Response: {"detail": "Invalid range request: Range exceeds file size"}
# Solution: Check file size first using /info endpoint
# Problem: Malformed range header
curl -H "Range: invalid-range" http://localhost:8000/videos/video.mp4/stream
# Response: {"detail": "Invalid range request: Malformed range header"}
# Solution: Use proper range format: "bytes=start-end"
```
#### Thumbnail Generation Issues
```bash
# Problem: Thumbnail generation failed (404)
curl http://localhost:8000/videos/audio_only.mp4/thumbnail
# Response: {"detail": "Could not generate thumbnail for audio_only.mp4"}
# Solution: Verify video has visual content and is not audio-only
# Problem: Invalid timestamp
curl "http://localhost:8000/videos/short_video.mp4/thumbnail?timestamp=999"
# Response: Returns thumbnail from last available frame
# Solution: Check video duration first using metadata
```
#### System Resource Issues
```bash
# Problem: Cache full or system overloaded (500)
curl http://localhost:8000/videos/large_video.mp4/stream
# Response: {"detail": "Failed to read video data"}
# Solution: Clean cache or wait for system resources
curl -X POST "http://localhost:8000/admin/videos/cache/cleanup?max_size_mb=50"
```
### Debugging Workflow
```bash
# Step 1: Check system health
curl http://localhost:8000/health
# Step 2: Verify video exists and get info
curl http://localhost:8000/videos/your_video_id
# Step 3: Check streaming capabilities
curl http://localhost:8000/videos/your_video_id/info
# Step 4: Validate video file
curl -X POST http://localhost:8000/videos/your_video_id/validate
# Step 5: Test basic streaming
curl -I http://localhost:8000/videos/your_video_id/stream
# Step 6: Test range request
curl -I -H "Range: bytes=0-1023" http://localhost:8000/videos/your_video_id/stream
```
### Performance Monitoring
```bash
# Monitor cache usage
curl -X POST "http://localhost:8000/admin/videos/cache/cleanup?max_size_mb=100"
# Check system resources
curl http://localhost:8000/system/status
# Monitor video module status
curl http://localhost:8000/videos/ | jq '.total_count'
```
## 🎯 Next Steps
1. **Restart the usda-vision-camera service** to enable video streaming
2. **Test the endpoints** using curl or your browser
3. **Integrate with your React app** using the provided examples
4. **Monitor performance** and adjust caching as needed
The video streaming system is now ready for production use! 🚀

View File

@@ -1,302 +0,0 @@
# 🤖 Web AI Agent - Video Integration Guide
This guide provides the essential information for integrating USDA Vision Camera video streaming into your web application.
## 🎯 Quick Start
### Video Streaming Status: ✅ READY
- **Progressive streaming implemented** - Videos play in browsers (no download)
- **86 MP4 files available** - All properly indexed and streamable
- **HTTP range requests supported** - Seeking and progressive playback work
- **Memory efficient** - 8KB chunked delivery
## 🚀 API Endpoints
### Base URL
```
http://localhost:8000
```
### 1. List Available Videos
```http
GET /videos/?camera_name={camera}&limit={limit}
```
**Example:**
```bash
curl "http://localhost:8000/videos/?camera_name=camera1&limit=10"
```
**Response:**
```json
{
"videos": [
{
"file_id": "camera1_auto_blower_separator_20250805_123329.mp4",
"camera_name": "camera1",
"file_size_bytes": 1072014489,
"format": "mp4",
"status": "completed",
"is_streamable": true,
"created_at": "2025-08-05T12:43:12.631210"
}
],
"total_count": 1
}
```
### 2. Stream Video (Progressive)
```http
GET /videos/{file_id}/stream
```
**Example:**
```bash
curl "http://localhost:8000/videos/camera1_auto_blower_separator_20250805_123329.mp4/stream"
```
**Features:**
- ✅ Progressive streaming (8KB chunks)
- ✅ HTTP range requests (206 Partial Content)
- ✅ Browser compatible (HTML5 video)
- ✅ Seeking support
- ✅ No authentication required
### 3. Get Video Thumbnail
```http
GET /videos/{file_id}/thumbnail?timestamp={seconds}&width={px}&height={px}
```
**Example:**
```bash
curl "http://localhost:8000/videos/camera1_auto_blower_separator_20250805_123329.mp4/thumbnail?timestamp=5.0&width=320&height=240"
```
## 🌐 Web Integration
### HTML5 Video Player
```html
<video controls width="100%" preload="metadata">
<source src="http://localhost:8000/videos/{file_id}/stream" type="video/mp4">
Your browser does not support video playback.
</video>
```
### React Component
```jsx
function VideoPlayer({ fileId, width = "100%" }) {
const streamUrl = `http://localhost:8000/videos/${fileId}/stream`;
const thumbnailUrl = `http://localhost:8000/videos/${fileId}/thumbnail`;
return (
<video
controls
width={width}
preload="metadata"
poster={thumbnailUrl}
style={{ maxWidth: '800px', borderRadius: '8px' }}
>
<source src={streamUrl} type="video/mp4" />
Your browser does not support video playback.
</video>
);
}
```
### Video List Component
```jsx
function VideoList({ cameraName = null, limit = 20 }) {
const [videos, setVideos] = useState([]);
const [loading, setLoading] = useState(true);
useEffect(() => {
const params = new URLSearchParams();
if (cameraName) params.append('camera_name', cameraName);
params.append('limit', limit.toString());
fetch(`http://localhost:8000/videos/?${params}`)
.then(response => response.json())
.then(data => {
// Filter only streamable MP4 videos
const streamableVideos = data.videos.filter(
v => v.format === 'mp4' && v.is_streamable
);
setVideos(streamableVideos);
setLoading(false);
})
.catch(error => {
console.error('Error loading videos:', error);
setLoading(false);
});
}, [cameraName, limit]);
if (loading) return <div>Loading videos...</div>;
return (
<div className="video-grid">
{videos.map(video => (
<div key={video.file_id} className="video-card">
<h3>{video.file_id}</h3>
<p>Camera: {video.camera_name}</p>
<p>Size: {(video.file_size_bytes / 1024 / 1024).toFixed(1)} MB</p>
<VideoPlayer fileId={video.file_id} width="100%" />
</div>
))}
</div>
);
}
```
## 📊 Available Data
### Current Video Inventory
- **Total Videos**: 161 files
- **MP4 Files**: 86 (all streamable ✅)
- **AVI Files**: 75 (legacy format, not prioritized)
- **Cameras**: camera1, camera2
- **Date Range**: July 29 - August 5, 2025
### Video File Naming Convention
```
{camera}_{trigger}_{machine}_{YYYYMMDD}_{HHMMSS}.mp4
```
**Examples:**
- `camera1_auto_blower_separator_20250805_123329.mp4`
- `camera2_auto_vibratory_conveyor_20250805_123042.mp4`
- `20250804_161305_manual_camera1_2025-08-04T20-13-09-634Z.mp4`
### Machine Triggers
- `auto_blower_separator` - Automatic recording triggered by blower separator
- `auto_vibratory_conveyor` - Automatic recording triggered by vibratory conveyor
- `manual` - Manual recording initiated by user
## 🔧 Technical Details
### Streaming Implementation
- **Method**: FastAPI `StreamingResponse` with async generators
- **Chunk Size**: 8KB for optimal performance
- **Range Requests**: Full HTTP/1.1 range request support
- **Status Codes**: 200 (full), 206 (partial), 404 (not found)
- **CORS**: Enabled for all origins
- **Caching**: Server-side byte-range caching
### Browser Compatibility
- ✅ Chrome/Chromium
- ✅ Firefox
- ✅ Safari
- ✅ Edge
- ✅ Mobile browsers
### Performance Characteristics
- **Memory Usage**: Low (8KB chunks, no large file loading)
- **Seeking**: Instant (HTTP range requests)
- **Startup Time**: Fast (metadata preload)
- **Bandwidth**: Adaptive (only downloads viewed portions)
## 🛠️ Error Handling
### Common Scenarios
```javascript
// Check if video is streamable
const checkVideo = async (fileId) => {
try {
const response = await fetch(`http://localhost:8000/videos/${fileId}`);
const video = await response.json();
if (!video.is_streamable) {
console.warn(`Video ${fileId} is not streamable`);
return false;
}
return true;
} catch (error) {
console.error(`Error checking video ${fileId}:`, error);
return false;
}
};
// Handle video loading errors
const VideoPlayerWithErrorHandling = ({ fileId }) => {
const [error, setError] = useState(null);
const handleError = (e) => {
console.error('Video playback error:', e);
setError('Failed to load video. Please try again.');
};
if (error) {
return <div className="error"> {error}</div>;
}
return (
<video
controls
onError={handleError}
src={`http://localhost:8000/videos/${fileId}/stream`}
/>
);
};
```
### HTTP Status Codes
- `200 OK` - Video streaming successfully
- `206 Partial Content` - Range request successful
- `404 Not Found` - Video not found or not streamable
- `416 Range Not Satisfiable` - Invalid range request
- `500 Internal Server Error` - Server error reading video
## 🔐 Security Notes
### Current Configuration
- **Authentication**: None (open access)
- **CORS**: Enabled for all origins
- **Network**: Designed for internal use
- **HTTPS**: Not required (HTTP works)
### For Production Use
Consider implementing:
- Authentication/authorization
- Rate limiting
- HTTPS/TLS encryption
- Network access controls
## 🧪 Testing
### Quick Test
```bash
# Test video listing
curl "http://localhost:8000/videos/?limit=5"
# Test video streaming
curl -I "http://localhost:8000/videos/camera1_auto_blower_separator_20250805_123329.mp4/stream"
# Test range request
curl -H "Range: bytes=0-1023" "http://localhost:8000/videos/camera1_auto_blower_separator_20250805_123329.mp4/stream" -o test_chunk.mp4
```
### Browser Test
Open: `file:///home/alireza/USDA-vision-cameras/test_video_streaming.html`
## 📞 Support
### Service Management
```bash
# Restart video service
sudo systemctl restart usda-vision-camera
# Check service status
sudo systemctl status usda-vision-camera
# View logs
sudo journalctl -u usda-vision-camera -f
```
### Health Check
```bash
curl http://localhost:8000/health
```
---
**✅ Ready for Integration**: The video streaming system is fully operational and ready for web application integration. All MP4 files are streamable with progressive playback support.

View File

@@ -1,521 +0,0 @@
# 🎛️ Camera Configuration API Guide
This guide explains how to configure camera settings via API endpoints, including all the advanced settings from your config.json.
> **Note**: This document is part of the comprehensive [USDA Vision Camera System API Documentation](../API_DOCUMENTATION.md). For complete API reference, see the main documentation.
## 📋 Configuration Categories
### ✅ **Real-time Configurable (No Restart Required)**
These settings can be changed while the camera is active:
- **Basic**: `exposure_ms`, `gain`, `target_fps`
- **Image Quality**: `sharpness`, `contrast`, `saturation`, `gamma`
- **Color**: `auto_white_balance`, `color_temperature_preset`
- **White Balance**: `wb_red_gain`, `wb_green_gain`, `wb_blue_gain`
- **Advanced**: `anti_flicker_enabled`, `light_frequency`
- **HDR**: `hdr_enabled`, `hdr_gain_mode`
### ⚠️ **Restart Required**
These settings require camera restart to take effect:
- **Noise Reduction**: `noise_filter_enabled`, `denoise_3d_enabled`
- **Video Recording**: `video_format`, `video_codec`, `video_quality`
- **System**: `machine_topic`, `storage_path`, `enabled`, `bit_depth`
### 🔒 **Read-Only Fields**
These fields are returned in the response but cannot be modified via the API:
- **System Info**: `name`, `machine_topic`, `storage_path`, `enabled`
- **Auto-Recording**: `auto_start_recording_enabled`, `auto_recording_max_retries`, `auto_recording_retry_delay_seconds`
## 🔌 API Endpoints
### 1. Get Camera Configuration
```http
GET /cameras/{camera_name}/config
```
**Response:**
```json
{
"name": "camera1",
"machine_topic": "blower_separator",
"storage_path": "/storage/camera1",
"exposure_ms": 0.3,
"gain": 4.0,
"target_fps": 0,
"enabled": true,
"video_format": "mp4",
"video_codec": "mp4v",
"video_quality": 95,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
"contrast": 100,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": false,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 0,
"wb_red_gain": 0.94,
"wb_green_gain": 1.0,
"wb_blue_gain": 0.87,
"anti_flicker_enabled": false,
"light_frequency": 0,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 2
}
```
### 2. Update Camera Configuration
```http
PUT /cameras/{camera_name}/config
Content-Type: application/json
```
**Request Body (all fields optional):**
```json
{
"exposure_ms": 2.0,
"gain": 4.0,
"target_fps": 10.0,
"sharpness": 150,
"contrast": 120,
"saturation": 110,
"gamma": 90,
"noise_filter_enabled": true,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 1,
"wb_red_gain": 1.2,
"wb_green_gain": 1.0,
"wb_blue_gain": 0.8,
"anti_flicker_enabled": true,
"light_frequency": 1,
"hdr_enabled": false,
"hdr_gain_mode": 0
}
```
**Response:**
```json
{
"success": true,
"message": "Camera camera1 configuration updated",
"updated_settings": ["exposure_ms", "gain", "sharpness", "wb_red_gain"]
}
```
### 3. Apply Configuration (Restart Camera)
```http
POST /cameras/{camera_name}/apply-config
```
**Response:**
```json
{
"success": true,
"message": "Configuration applied to camera camera1"
}
```
## 📊 Setting Ranges and Descriptions
### System Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `name` | string | - | Camera identifier (read-only) |
| `machine_topic` | string | - | MQTT topic for machine state (read-only) |
| `storage_path` | string | - | Video storage directory (read-only) |
| `enabled` | true/false | true | Camera enabled status (read-only) |
### Auto-Recording Settings
| Setting | Range | Default | Description |
|---------|-------|---------|-------------|
| `auto_start_recording_enabled` | true/false | true | Enable automatic recording on machine state changes (read-only) |
| `auto_recording_max_retries` | 1-10 | 3 | Maximum retry attempts for failed recordings (read-only) |
| `auto_recording_retry_delay_seconds` | 1-30 | 2 | Delay between retry attempts in seconds (read-only) |
### Basic Settings
| Setting | Range | Default | Description |
|---------|-------|---------|-------------|
| `exposure_ms` | 0.1 - 1000.0 | 1.0 | Exposure time in milliseconds |
| `gain` | 0.0 - 20.0 | 3.5 | Camera gain multiplier |
| `target_fps` | 0.0 - 120.0 | 0 | Target FPS (0 = maximum) |
### Image Quality Settings
| Setting | Range | Default | Description |
|---------|-------|---------|-------------|
| `sharpness` | 0 - 200 | 100 | Image sharpness (100 = no sharpening) |
| `contrast` | 0 - 200 | 100 | Image contrast (100 = normal) |
| `saturation` | 0 - 200 | 100 | Color saturation (color cameras only) |
| `gamma` | 0 - 300 | 100 | Gamma correction (100 = normal) |
### Color Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `auto_white_balance` | true/false | true | Automatic white balance |
| `color_temperature_preset` | 0-10 | 0 | Color temperature preset (0=auto) |
### Manual White Balance RGB Gains
| Setting | Range | Default | Description |
|---------|-------|---------|-------------|
| `wb_red_gain` | 0.0 - 3.99 | 1.0 | Red channel gain for manual white balance |
| `wb_green_gain` | 0.0 - 3.99 | 1.0 | Green channel gain for manual white balance |
| `wb_blue_gain` | 0.0 - 3.99 | 1.0 | Blue channel gain for manual white balance |
### Advanced Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `anti_flicker_enabled` | true/false | true | Reduce artificial lighting flicker |
| `light_frequency` | 0/1 | 1 | Light frequency (0=50Hz, 1=60Hz) |
| `noise_filter_enabled` | true/false | true | Basic noise filtering |
| `denoise_3d_enabled` | true/false | false | Advanced 3D denoising |
### HDR Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `hdr_enabled` | true/false | false | High Dynamic Range |
| `hdr_gain_mode` | 0-3 | 0 | HDR processing mode |
## 🚀 Usage Examples
### Example 1: Adjust Exposure and Gain
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"exposure_ms": 1.5,
"gain": 4.0
}'
```
### Example 2: Improve Image Quality
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"sharpness": 150,
"contrast": 120,
"gamma": 90
}'
```
### Example 3: Configure for Indoor Lighting
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"anti_flicker_enabled": true,
"light_frequency": 1,
"auto_white_balance": false,
"color_temperature_preset": 2
}'
```
### Example 4: Enable HDR Mode
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"hdr_enabled": true,
"hdr_gain_mode": 1
}'
```
## ⚛️ React Integration Examples
### Camera Configuration Component
```jsx
import React, { useState, useEffect } from 'react';
const CameraConfig = ({ cameraName, apiBaseUrl = 'http://localhost:8000' }) => {
const [config, setConfig] = useState(null);
const [loading, setLoading] = useState(false);
const [error, setError] = useState(null);
// Load current configuration
useEffect(() => {
fetchConfig();
}, [cameraName]);
const fetchConfig = async () => {
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/config`);
if (response.ok) {
const data = await response.json();
setConfig(data);
} else {
setError('Failed to load configuration');
}
} catch (err) {
setError(`Error: ${err.message}`);
}
};
const updateConfig = async (updates) => {
setLoading(true);
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/config`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(updates)
});
if (response.ok) {
const result = await response.json();
console.log('Updated settings:', result.updated_settings);
await fetchConfig(); // Reload configuration
} else {
const error = await response.json();
setError(error.detail || 'Update failed');
}
} catch (err) {
setError(`Error: ${err.message}`);
} finally {
setLoading(false);
}
};
const handleSliderChange = (setting, value) => {
updateConfig({ [setting]: value });
};
if (!config) return <div>Loading configuration...</div>;
return (
<div className="camera-config">
<h3>Camera Configuration: {cameraName}</h3>
{/* System Information (Read-Only) */}
<div className="config-section">
<h4>System Information</h4>
<div className="info-grid">
<div><strong>Name:</strong> {config.name}</div>
<div><strong>Machine Topic:</strong> {config.machine_topic}</div>
<div><strong>Storage Path:</strong> {config.storage_path}</div>
<div><strong>Enabled:</strong> {config.enabled ? 'Yes' : 'No'}</div>
<div><strong>Auto Recording:</strong> {config.auto_start_recording_enabled ? 'Enabled' : 'Disabled'}</div>
<div><strong>Max Retries:</strong> {config.auto_recording_max_retries}</div>
<div><strong>Retry Delay:</strong> {config.auto_recording_retry_delay_seconds}s</div>
</div>
</div>
{/* Basic Settings */}
<div className="config-section">
<h4>Basic Settings</h4>
<div className="setting">
<label>Exposure (ms): {config.exposure_ms}</label>
<input
type="range"
min="0.1"
max="10"
step="0.1"
value={config.exposure_ms}
onChange={(e) => handleSliderChange('exposure_ms', parseFloat(e.target.value))}
/>
</div>
<div className="setting">
<label>Gain: {config.gain}</label>
<input
type="range"
min="0"
max="10"
step="0.1"
value={config.gain}
onChange={(e) => handleSliderChange('gain', parseFloat(e.target.value))}
/>
</div>
<div className="setting">
<label>Target FPS: {config.target_fps}</label>
<input
type="range"
min="0"
max="30"
step="1"
value={config.target_fps}
onChange={(e) => handleSliderChange('target_fps', parseInt(e.target.value))}
/>
</div>
</div>
{/* Image Quality Settings */}
<div className="config-section">
<h4>Image Quality</h4>
<div className="setting">
<label>Sharpness: {config.sharpness}</label>
<input
type="range"
min="0"
max="200"
value={config.sharpness}
onChange={(e) => handleSliderChange('sharpness', parseInt(e.target.value))}
/>
</div>
<div className="setting">
<label>Contrast: {config.contrast}</label>
<input
type="range"
min="0"
max="200"
value={config.contrast}
onChange={(e) => handleSliderChange('contrast', parseInt(e.target.value))}
/>
</div>
<div className="setting">
<label>Gamma: {config.gamma}</label>
<input
type="range"
min="0"
max="300"
value={config.gamma}
onChange={(e) => handleSliderChange('gamma', parseInt(e.target.value))}
/>
</div>
</div>
{/* White Balance RGB Gains */}
<div className="config-section">
<h4>White Balance RGB Gains</h4>
<div className="setting">
<label>Red Gain: {config.wb_red_gain}</label>
<input
type="range"
min="0"
max="3.99"
step="0.01"
value={config.wb_red_gain}
onChange={(e) => handleSliderChange('wb_red_gain', parseFloat(e.target.value))}
/>
</div>
<div className="setting">
<label>Green Gain: {config.wb_green_gain}</label>
<input
type="range"
min="0"
max="3.99"
step="0.01"
value={config.wb_green_gain}
onChange={(e) => handleSliderChange('wb_green_gain', parseFloat(e.target.value))}
/>
</div>
<div className="setting">
<label>Blue Gain: {config.wb_blue_gain}</label>
<input
type="range"
min="0"
max="3.99"
step="0.01"
value={config.wb_blue_gain}
onChange={(e) => handleSliderChange('wb_blue_gain', parseFloat(e.target.value))}
/>
</div>
</div>
{/* Advanced Settings */}
<div className="config-section">
<h4>Advanced Settings</h4>
<div className="setting">
<label>
<input
type="checkbox"
checked={config.anti_flicker_enabled}
onChange={(e) => updateConfig({ anti_flicker_enabled: e.target.checked })}
/>
Anti-flicker Enabled
</label>
</div>
<div className="setting">
<label>
<input
type="checkbox"
checked={config.auto_white_balance}
onChange={(e) => updateConfig({ auto_white_balance: e.target.checked })}
/>
Auto White Balance
</label>
</div>
<div className="setting">
<label>
<input
type="checkbox"
checked={config.hdr_enabled}
onChange={(e) => updateConfig({ hdr_enabled: e.target.checked })}
/>
HDR Enabled
</label>
</div>
</div>
{error && (
<div className="error" style={{ color: 'red', marginTop: '10px' }}>
{error}
</div>
)}
{loading && <div>Updating configuration...</div>}
</div>
);
};
export default CameraConfig;
```
## 🔄 Configuration Workflow
### 1. Real-time Adjustments
For settings that don't require restart:
```bash
# Update settings
curl -X PUT /cameras/camera1/config -d '{"exposure_ms": 2.0}'
# Settings take effect immediately
# Continue recording/streaming without interruption
```
### 2. Settings Requiring Restart
For noise reduction and system settings:
```bash
# Update settings
curl -X PUT /cameras/camera1/config -d '{"noise_filter_enabled": false}'
# Apply configuration (restarts camera)
curl -X POST /cameras/camera1/apply-config
# Camera reinitializes with new settings
```
## 🚨 Important Notes
### Camera State During Updates
- **Real-time settings**: Applied immediately, no interruption
- **Restart-required settings**: Saved to config, applied on next restart
- **Recording**: Continues during real-time updates
- **Streaming**: Continues during real-time updates
### Error Handling
- Invalid ranges return HTTP 422 with validation errors
- Camera not found returns HTTP 404
- SDK errors are logged and return HTTP 500
### Performance Impact
- **Image quality settings**: Minimal performance impact
- **Noise reduction**: May reduce FPS when enabled
- **HDR**: Significant processing overhead when enabled
This comprehensive API allows you to control all camera settings programmatically, making it perfect for integration with React dashboards or automated optimization systems!

View File

@@ -1,127 +0,0 @@
# Blower Camera (Camera1) Configuration
This document describes the default configuration for the blower camera (Camera1) based on the GigE camera settings from the dedicated software.
## Camera Identification
- **Camera Name**: camera1 (Blower-Yield-Cam)
- **Machine Topic**: blower_separator
- **Purpose**: Monitors the blower separator machine
## Configuration Summary
Based on the camera settings screenshots, the following configuration has been applied to Camera1:
### Exposure Settings
- **Mode**: Manual (not Auto)
- **Exposure Time**: 1.0ms (1000μs)
- **Gain**: 3.5x (350 in camera units)
- **Anti-Flicker**: Enabled (50Hz mode)
### Color Processing Settings
- **White Balance Mode**: Manual (not Auto)
- **Color Temperature**: D65 (6500K)
- **RGB Gain Values**:
- Red Gain: 1.00
- Green Gain: 1.00
- Blue Gain: 1.00
- **Saturation**: 100 (normal)
### LUT (Look-Up Table) Settings
- **Mode**: Dynamically generated (not Preset or Custom)
- **Gamma**: 1.00 (100 in config units)
- **Contrast**: 100 (normal)
### Advanced Settings
- **Anti-Flicker**: Enabled
- **Light Frequency**: 60Hz (1 in config)
- **Bit Depth**: 8-bit
- **HDR**: Disabled
## Configuration Mapping
The screenshots show these key settings that have been mapped to the config.json:
| Screenshot Setting | Config Parameter | Value | Notes |
|-------------------|------------------|-------|-------|
| Manual Exposure | auto_exposure | false | Exposure mode set to manual |
| Time(ms): 1.0000 | exposure_ms | 1.0 | Exposure time in milliseconds |
| Gain(multiple): 3.500 | gain | 3.5 | Analog gain multiplier |
| Manual White Balance | auto_white_balance | false | Manual WB mode |
| Color Temperature: D65 | color_temperature_preset | 6500 | D65 = 6500K |
| Red Gain: 1.00 | wb_red_gain | 1.0 | Manual RGB gain |
| Green Gain: 1.00 | wb_green_gain | 1.0 | Manual RGB gain |
| Blue Gain: 1.00 | wb_blue_gain | 1.0 | Manual RGB gain |
| Saturation: 100 | saturation | 100 | Color saturation |
| Gamma: 1.00 | gamma | 100 | Gamma correction |
| Contrast: 100 | contrast | 100 | Image contrast |
| 50HZ Anti-Flicker | anti_flicker_enabled | true | Flicker reduction |
| 60Hz frequency | light_frequency | 1 | Power frequency |
## Current Configuration
The current config.json for camera1 includes:
```json
{
"name": "camera1",
"machine_topic": "blower_separator",
"storage_path": "/storage/camera1",
"exposure_ms": 1.0,
"gain": 3.5,
"target_fps": 0,
"enabled": true,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
"sharpness": 100,
"contrast": 100,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": false,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 6500,
"anti_flicker_enabled": true,
"light_frequency": 1,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 0
}
```
## Camera Preview Enhancement
**Important Update**: The camera preview/streaming functionality has been enhanced to apply all default configuration settings from config.json, ensuring that preview images match the quality and appearance of recorded videos.
### What This Means for Camera1
When you view the camera preview, you'll now see:
- **Manual exposure** (1.0ms) and **high gain** (3.5x) applied
- **50Hz anti-flicker** filtering active
- **Manual white balance** with balanced RGB gains (1.0, 1.0, 1.0)
- **Standard image processing** (sharpness: 100, contrast: 100, gamma: 100, saturation: 100)
- **D65 color temperature** (6500K) applied
This ensures the preview accurately represents what will be recorded.
## Notes
1. **Machine Topic Correction**: The machine topic has been corrected from "vibratory_conveyor" to "blower_separator" to match the camera's actual monitoring purpose.
2. **Manual White Balance**: The camera is configured for manual white balance with D65 color temperature, which is appropriate for daylight conditions.
3. **RGB Gain Support**: The current configuration system needs to be extended to support individual RGB gain values for manual white balance fine-tuning.
4. **Anti-Flicker**: Enabled to reduce artificial lighting interference, set to 60Hz to match North American power frequency.
5. **LUT Mode**: The camera uses dynamically generated LUT with gamma=1.00 and contrast=100, which provides linear response.
## Future Enhancements
To fully support all settings shown in the screenshots, the following parameters should be added to the configuration system:
- `wb_red_gain`: Red channel gain for manual white balance (0.0-3.99)
- `wb_green_gain`: Green channel gain for manual white balance (0.0-3.99)
- `wb_blue_gain`: Blue channel gain for manual white balance (0.0-3.99)
- `lut_mode`: LUT generation mode (0=dynamic, 1=preset, 2=custom)
- `lut_preset`: Preset LUT selection when using preset mode

View File

@@ -1,150 +0,0 @@
# Conveyor Camera (Camera2) Configuration
This document describes the default configuration for the conveyor camera (Camera2) based on the GigE camera settings from the dedicated software.
## Camera Identification
- **Camera Name**: camera2 (Cracker-Cam)
- **Machine Topic**: vibratory_conveyor
- **Purpose**: Monitors the vibratory conveyor/cracker machine
## Configuration Summary
Based on the camera settings screenshots, the following configuration has been applied to Camera2:
### Color Processing Settings
- **White Balance Mode**: Manual (not Auto)
- **Color Temperature**: D65 (6500K)
- **RGB Gain Values**:
- Red Gain: 1.01
- Green Gain: 1.00
- Blue Gain: 0.87
- **Saturation**: 100 (normal)
### LUT (Look-Up Table) Settings
- **Mode**: Dynamically generated (not Preset or Custom)
- **Gamma**: 1.00 (100 in config units)
- **Contrast**: 100 (normal)
### Graphic Processing Settings
- **Sharpness Level**: 0 (no sharpening applied)
- **Noise Reduction**:
- Denoise2D: Disabled
- Denoise3D: Disabled
- **Rotation**: Disabled
- **Lens Distortion Correction**: Disabled
- **Dead Pixel Correction**: Enabled
- **Flat Fielding Correction**: Disabled
## Configuration Mapping
The screenshots show these key settings that have been mapped to the config.json:
| Screenshot Setting | Config Parameter | Value | Notes |
|-------------------|------------------|-------|-------|
| Manual White Balance | auto_white_balance | false | Manual WB mode |
| Color Temperature: D65 | color_temperature_preset | 6500 | D65 = 6500K |
| Red Gain: 1.01 | wb_red_gain | 1.01 | Manual RGB gain |
| Green Gain: 1.00 | wb_green_gain | 1.0 | Manual RGB gain |
| Blue Gain: 0.87 | wb_blue_gain | 0.87 | Manual RGB gain |
| Saturation: 100 | saturation | 100 | Color saturation |
| Gamma: 1.00 | gamma | 100 | Gamma correction |
| Contrast: 100 | contrast | 100 | Image contrast |
| Sharpen Level: 0 | sharpness | 0 | No sharpening |
| Denoise2D: Disabled | noise_filter_enabled | false | Basic noise filter off |
| Denoise3D: Disable | denoise_3d_enabled | false | Advanced denoising off |
## Current Configuration
The current config.json for camera2 includes:
```json
{
"name": "camera2",
"machine_topic": "vibratory_conveyor",
"storage_path": "/storage/camera2",
"exposure_ms": 0.5,
"gain": 0.3,
"target_fps": 0,
"enabled": true,
"auto_start_recording_enabled": true,
"auto_recording_max_retries": 3,
"auto_recording_retry_delay_seconds": 2,
"sharpness": 0,
"contrast": 100,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": false,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 6500,
"wb_red_gain": 1.01,
"wb_green_gain": 1.0,
"wb_blue_gain": 0.87,
"anti_flicker_enabled": false,
"light_frequency": 1,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 0
}
```
## Key Differences from Camera1 (Blower Camera)
1. **RGB Gain Tuning**: Camera2 has custom RGB gains (R:1.01, G:1.00, B:0.87) vs Camera1's balanced gains (all 1.0)
2. **Sharpness**: Camera2 has sharpness disabled (0) vs Camera1's normal sharpness (100)
3. **Exposure/Gain**: Camera2 uses lower exposure (0.5ms) and gain (0.3x) vs Camera1's higher values (1.0ms, 3.5x)
4. **Anti-Flicker**: Camera2 has anti-flicker disabled vs Camera1's enabled anti-flicker
## Notes
1. **Custom White Balance**: Camera2 uses manual white balance with custom RGB gains, suggesting specific lighting conditions or color correction requirements for the conveyor monitoring.
2. **No Sharpening**: Sharpness is set to 0, indicating the raw image quality is preferred without artificial enhancement.
3. **Minimal Noise Reduction**: Both 2D and 3D denoising are disabled, prioritizing image authenticity over noise reduction.
4. **Dead Pixel Correction**: Enabled to handle any defective pixels on the sensor.
5. **Lower Sensitivity**: The lower exposure and gain settings suggest better lighting conditions or different monitoring requirements compared to the blower camera.
## Camera Preview Enhancement
**Important Update**: The camera preview/streaming functionality has been enhanced to apply all default configuration settings from config.json, ensuring that preview images match the quality and appearance of recorded videos.
### What Changed
Previously, camera preview only applied basic settings (exposure, gain, trigger mode). Now, the preview applies the complete configuration including:
- **Image Quality**: Sharpness, contrast, gamma, saturation
- **Color Processing**: White balance mode, color temperature, RGB gains
- **Advanced Settings**: Anti-flicker, light frequency, HDR settings
- **Noise Reduction**: Filter and 3D denoising settings (where supported)
### Benefits
1. **WYSIWYG Preview**: What you see in the preview is exactly what gets recorded
2. **Accurate Color Representation**: Manual white balance and RGB gains are applied to preview
3. **Consistent Image Quality**: Sharpness, contrast, and gamma settings match recording
4. **Proper Exposure**: Anti-flicker and lighting frequency settings are applied
### Technical Implementation
The `CameraStreamer` class now includes the same comprehensive configuration methods as `CameraRecorder`:
- `_configure_image_quality()`: Applies sharpness, contrast, gamma, saturation
- `_configure_color_settings()`: Applies white balance mode, color temperature, RGB gains
- `_configure_advanced_settings()`: Applies anti-flicker, light frequency, HDR
- `_configure_noise_reduction()`: Applies noise filter settings
These methods are called during camera initialization for streaming, ensuring all config.json settings are applied.
## Future Enhancements
Additional parameters that could be added to support all graphic processing features:
- `rotation_angle`: Image rotation (0, 90, 180, 270 degrees)
- `lens_distortion_correction`: Enable/disable lens distortion correction
- `dead_pixel_correction`: Enable/disable dead pixel correction
- `flat_fielding_correction`: Enable/disable flat fielding correction
- `mirror_horizontal`: Horizontal mirroring
- `mirror_vertical`: Vertical mirroring

View File

@@ -1,159 +0,0 @@
# Camera Preview Enhancement
## Overview
The camera preview/streaming functionality has been significantly enhanced to apply all default configuration settings from `config.json`, ensuring that preview images accurately represent what will be recorded.
## Problem Solved
Previously, camera preview only applied basic settings (exposure, gain, trigger mode, frame rate), while recording applied the full configuration. This meant:
- Preview images looked different from recorded videos
- Color balance, sharpness, and other image quality settings were not visible in preview
- Users couldn't accurately assess the final recording quality from the preview
## Solution Implemented
The `CameraStreamer` class has been enhanced with comprehensive configuration methods that mirror those in `CameraRecorder`:
### New Configuration Methods Added
1. **`_configure_image_quality()`**
- Applies sharpness settings (0-200)
- Applies contrast settings (0-200)
- Applies gamma correction (0-300)
- Applies saturation for color cameras (0-200)
2. **`_configure_color_settings()`**
- Sets white balance mode (auto/manual)
- Applies color temperature presets
- Sets manual RGB gains for precise color tuning
3. **`_configure_advanced_settings()`**
- Enables/disables anti-flicker filtering
- Sets light frequency (50Hz/60Hz)
- Configures HDR settings when available
4. **`_configure_noise_reduction()`**
- Configures noise filter settings
- Configures 3D denoising settings
### Enhanced Main Configuration Method
The `_configure_streaming_settings()` method now calls all configuration methods:
```python
def _configure_streaming_settings(self):
"""Configure camera settings from config.json for streaming"""
try:
# Basic settings (existing)
mvsdk.CameraSetTriggerMode(self.hCamera, 0)
mvsdk.CameraSetAeState(self.hCamera, 0)
exposure_us = int(self.camera_config.exposure_ms * 1000)
mvsdk.CameraSetExposureTime(self.hCamera, exposure_us)
gain_value = int(self.camera_config.gain * 100)
mvsdk.CameraSetAnalogGain(self.hCamera, gain_value)
# Comprehensive configuration (new)
self._configure_image_quality()
self._configure_noise_reduction()
if not self.monoCamera:
self._configure_color_settings()
self._configure_advanced_settings()
except Exception as e:
self.logger.warning(f"Could not configure some streaming settings: {e}")
```
## Benefits
### 1. WYSIWYG Preview
- **What You See Is What You Get**: Preview now accurately represents final recording quality
- **Real-time Assessment**: Users can evaluate recording quality before starting actual recording
- **Consistent Experience**: No surprises when comparing preview to recorded footage
### 2. Accurate Color Representation
- **Manual White Balance**: RGB gains are applied to preview for accurate color reproduction
- **Color Temperature**: D65 or other presets are applied consistently
- **Saturation**: Color intensity matches recording settings
### 3. Proper Image Quality
- **Sharpness**: Edge enhancement settings are visible in preview
- **Contrast**: Dynamic range adjustments are applied
- **Gamma**: Brightness curve corrections are active
### 4. Environmental Adaptation
- **Anti-Flicker**: Artificial lighting interference is filtered in preview
- **Light Frequency**: 50Hz/60Hz settings match local power grid
- **HDR**: High dynamic range processing when enabled
## Camera-Specific Impact
### Camera1 (Blower Separator)
Preview now shows:
- Manual exposure (1.0ms) and high gain (3.5x)
- 50Hz anti-flicker filtering
- Manual white balance with balanced RGB gains (1.0, 1.0, 1.0)
- Standard image processing (sharpness: 100, contrast: 100, gamma: 100, saturation: 100)
- D65 color temperature (6500K)
### Camera2 (Conveyor/Cracker)
Preview now shows:
- Manual exposure (0.5ms) and lower gain (0.3x)
- Custom RGB color tuning (R:1.01, G:1.00, B:0.87)
- No image sharpening (sharpness: 0)
- Enhanced saturation (100) and proper gamma (100)
- D65 color temperature with manual white balance
## Technical Implementation Details
### Error Handling
- All configuration methods include try-catch blocks
- Warnings are logged for unsupported features
- Graceful degradation when SDK functions are unavailable
- Streaming continues even if some settings fail to apply
### SDK Compatibility
- Checks for function availability before calling
- Handles different SDK versions gracefully
- Logs informational messages for unavailable features
### Performance Considerations
- Configuration is applied once during camera initialization
- No performance impact on streaming frame rate
- Separate camera instance for streaming (doesn't interfere with recording)
## Usage
No changes required for users - the enhancement is automatic:
1. **Start Preview**: Use existing preview endpoints
2. **View Stream**: Camera automatically applies all config.json settings
3. **Compare**: Preview now matches recording quality exactly
### API Endpoints (unchanged)
- `GET /cameras/{camera_name}/stream` - Get live MJPEG stream
- `POST /cameras/{camera_name}/start-stream` - Start streaming
- `POST /cameras/{camera_name}/stop-stream` - Stop streaming
## Future Enhancements
Additional settings that could be added to further improve preview accuracy:
1. **Geometric Corrections**
- Lens distortion correction
- Dead pixel correction
- Flat fielding correction
2. **Image Transformations**
- Rotation (90°, 180°, 270°)
- Horizontal/vertical mirroring
3. **Advanced Processing**
- Custom LUT (Look-Up Table) support
- Advanced noise reduction algorithms
- Real-time image enhancement filters
## Conclusion
This enhancement significantly improves the user experience by providing accurate, real-time preview of camera output with all configuration settings applied. Users can now confidently assess recording quality, adjust settings, and ensure optimal camera performance before starting critical recordings.

View File

@@ -1,262 +0,0 @@
# Auto-Recording Feature Implementation Guide
## 🎯 Overview for React App Development
This document provides a comprehensive guide for updating the React application to support the new auto-recording feature that was added to the USDA Vision Camera System.
> **📚 For complete API reference**: See the [USDA Vision Camera System API Documentation](../API_DOCUMENTATION.md) for detailed endpoint specifications and examples.
## 📋 What Changed in the Backend
### New API Endpoints Added
1. **Enable Auto-Recording**
```http
POST /cameras/{camera_name}/auto-recording/enable
Response: AutoRecordingConfigResponse
```
2. **Disable Auto-Recording**
```http
POST /cameras/{camera_name}/auto-recording/disable
Response: AutoRecordingConfigResponse
```
3. **Get Auto-Recording Status**
```http
GET /auto-recording/status
Response: AutoRecordingStatusResponse
```
### Updated API Responses
#### CameraStatusResponse (Updated)
```typescript
interface CameraStatusResponse {
name: string;
status: string;
is_recording: boolean;
last_checked: string;
last_error?: string;
device_info?: any;
current_recording_file?: string;
recording_start_time?: string;
// NEW AUTO-RECORDING FIELDS
auto_recording_enabled: boolean;
auto_recording_active: boolean;
auto_recording_failure_count: number;
auto_recording_last_attempt?: string;
auto_recording_last_error?: string;
}
```
#### CameraConfigResponse (Updated)
```typescript
interface CameraConfigResponse {
name: string;
machine_topic: string;
storage_path: string;
enabled: boolean;
// NEW AUTO-RECORDING CONFIG FIELDS
auto_start_recording_enabled: boolean;
auto_recording_max_retries: number;
auto_recording_retry_delay_seconds: number;
// ... existing fields (exposure_ms, gain, etc.)
}
```
#### New Response Types
```typescript
interface AutoRecordingConfigResponse {
success: boolean;
message: string;
camera_name: string;
enabled: boolean;
}
interface AutoRecordingStatusResponse {
running: boolean;
auto_recording_enabled: boolean;
retry_queue: Record<string, any>;
enabled_cameras: string[];
}
```
## 🎨 React App UI Requirements
### 1. Camera Status Display Updates
**Add to Camera Cards/Components:**
- Auto-recording enabled/disabled indicator
- Auto-recording active status (when machine is ON and auto-recording)
- Failure count display (if > 0)
- Last auto-recording error (if any)
- Visual distinction between manual and auto-recording
**Example UI Elements:**
```jsx
// Auto-recording status badge
{camera.auto_recording_enabled && (
<Badge variant={camera.auto_recording_active ? "success" : "secondary"}>
Auto-Recording {camera.auto_recording_active ? "Active" : "Enabled"}
</Badge>
)}
// Failure indicator
{camera.auto_recording_failure_count > 0 && (
<Alert variant="warning">
Auto-recording failures: {camera.auto_recording_failure_count}
</Alert>
)}
```
### 2. Auto-Recording Controls
**Add Toggle Controls:**
- Enable/Disable auto-recording per camera
- Global auto-recording status display
- Retry queue monitoring
**Example Control Component:**
```jsx
const AutoRecordingToggle = ({ camera, onToggle }) => {
const handleToggle = async () => {
const endpoint = camera.auto_recording_enabled ? 'disable' : 'enable';
await fetch(`/cameras/${camera.name}/auto-recording/${endpoint}`, {
method: 'POST'
});
onToggle();
};
return (
<Switch
checked={camera.auto_recording_enabled}
onChange={handleToggle}
label="Auto-Recording"
/>
);
};
```
### 3. Machine State Integration
**Display Machine Status:**
- Show which machine each camera monitors
- Display current machine state (ON/OFF)
- Show correlation between machine state and recording status
**Camera-Machine Mapping:**
- Camera 1 → Vibratory Conveyor (conveyor/cracker cam)
- Camera 2 → Blower Separator (blower separator)
### 4. Auto-Recording Dashboard
**Create New Dashboard Section:**
- Overall auto-recording system status
- List of cameras with auto-recording enabled
- Active retry queue display
- Recent auto-recording events/logs
## 🔧 Implementation Steps for React App
### Step 1: Update TypeScript Interfaces
```typescript
// Update existing interfaces in your types file
// Add new interfaces for auto-recording responses
```
### Step 2: Update API Service Functions
```typescript
// Add new API calls
export const enableAutoRecording = (cameraName: string) =>
fetch(`/cameras/${cameraName}/auto-recording/enable`, { method: 'POST' });
export const disableAutoRecording = (cameraName: string) =>
fetch(`/cameras/${cameraName}/auto-recording/disable`, { method: 'POST' });
export const getAutoRecordingStatus = () =>
fetch('/auto-recording/status').then(res => res.json());
```
### Step 3: Update Camera Components
- Add auto-recording status indicators
- Add enable/disable controls
- Update recording status display to distinguish auto vs manual
### Step 4: Create Auto-Recording Management Panel
- System-wide auto-recording status
- Per-camera auto-recording controls
- Retry queue monitoring
- Error reporting and alerts
### Step 5: Update State Management
```typescript
// Add auto-recording state to your store/context
interface AppState {
cameras: CameraStatusResponse[];
autoRecordingStatus: AutoRecordingStatusResponse;
// ... existing state
}
```
## 🎯 Key User Experience Considerations
### Visual Indicators
1. **Recording Status Hierarchy:**
- Manual Recording (highest priority - red/prominent)
- Auto-Recording Active (green/secondary)
- Auto-Recording Enabled but Inactive (blue/subtle)
- Auto-Recording Disabled (gray/muted)
2. **Machine State Correlation:**
- Show machine ON/OFF status next to camera
- Indicate when auto-recording should be active
- Alert if machine is ON but auto-recording failed
3. **Error Handling:**
- Clear error messages for auto-recording failures
- Retry count display
- Last attempt timestamp
- Quick retry/reset options
### User Controls
1. **Quick Actions:**
- Toggle auto-recording per camera
- Force retry failed auto-recording
- Override auto-recording (manual control)
2. **Configuration:**
- Adjust retry settings
- Change machine-camera mappings
- Set recording parameters for auto-recording
## 🚨 Important Notes
### Behavior Rules
1. **Manual Override:** Manual recording always takes precedence over auto-recording
2. **Non-Blocking:** Auto-recording status checks don't interfere with camera operation
3. **Machine Correlation:** Auto-recording only activates when the associated machine turns ON
4. **Failure Handling:** Failed auto-recording attempts are retried automatically with exponential backoff
### API Polling Recommendations
- Poll camera status every 2-3 seconds for real-time updates
- Poll auto-recording status every 5-10 seconds
- Use WebSocket connections if available for real-time machine state updates
## 📱 Mobile Considerations
- Auto-recording controls should be easily accessible on mobile
- Status indicators should be clear and readable on small screens
- Consider collapsible sections for detailed auto-recording information
## 🔍 Testing Checklist
- [ ] Auto-recording toggle works for each camera
- [ ] Status updates reflect machine state changes
- [ ] Error states are clearly displayed
- [ ] Manual recording overrides auto-recording
- [ ] Retry mechanism is visible to users
- [ ] Mobile interface is functional
This guide provides everything needed to update the React app to fully support the new auto-recording feature!

View File

@@ -1,158 +0,0 @@
# Camera Recovery and Diagnostics Guide
This guide explains the new camera recovery functionality implemented in the USDA Vision Camera System API.
## Overview
The system now includes comprehensive camera recovery capabilities to handle connection issues, initialization failures, and other camera-related problems. These features use the underlying mvsdk (python demo) library functions to perform various recovery operations.
## Available Recovery Operations
### 1. Connection Test (`/cameras/{camera_name}/test-connection`)
- **Purpose**: Test if the camera connection is working
- **SDK Function**: `CameraConnectTest()`
- **Use Case**: Diagnose connection issues
- **HTTP Method**: POST
- **Response**: `CameraTestResponse`
### 2. Reconnect (`/cameras/{camera_name}/reconnect`)
- **Purpose**: Soft reconnection to the camera
- **SDK Function**: `CameraReConnect()`
- **Use Case**: Most common fix for connection issues
- **HTTP Method**: POST
- **Response**: `CameraRecoveryResponse`
### 3. Restart Grab (`/cameras/{camera_name}/restart-grab`)
- **Purpose**: Restart the camera grab process
- **SDK Function**: `CameraRestartGrab()`
- **Use Case**: Fix issues with image capture
- **HTTP Method**: POST
- **Response**: `CameraRecoveryResponse`
### 4. Reset Timestamp (`/cameras/{camera_name}/reset-timestamp`)
- **Purpose**: Reset camera timestamp
- **SDK Function**: `CameraRstTimeStamp()`
- **Use Case**: Fix timing-related issues
- **HTTP Method**: POST
- **Response**: `CameraRecoveryResponse`
### 5. Full Reset (`/cameras/{camera_name}/full-reset`)
- **Purpose**: Complete camera reset (uninitialize and reinitialize)
- **SDK Functions**: `CameraUnInit()` + `CameraInit()`
- **Use Case**: Hard reset for persistent issues
- **HTTP Method**: POST
- **Response**: `CameraRecoveryResponse`
### 6. Reinitialize (`/cameras/{camera_name}/reinitialize`)
- **Purpose**: Reinitialize cameras that failed initial setup
- **SDK Functions**: Complete recorder recreation
- **Use Case**: Cameras that never initialized properly
- **HTTP Method**: POST
- **Response**: `CameraRecoveryResponse`
## Recommended Troubleshooting Workflow
When a camera has issues, follow this order:
1. **Test Connection** - Diagnose the problem
```http
POST http://localhost:8000/cameras/camera1/test-connection
```
2. **Try Reconnect** - Most common fix
```http
POST http://localhost:8000/cameras/camera1/reconnect
```
3. **Restart Grab** - If reconnect doesn't work
```http
POST http://localhost:8000/cameras/camera1/restart-grab
```
4. **Full Reset** - For persistent issues
```http
POST http://localhost:8000/cameras/camera1/full-reset
```
5. **Reinitialize** - For cameras that never worked
```http
POST http://localhost:8000/cameras/camera1/reinitialize
```
## Response Format
All recovery operations return structured responses:
### CameraTestResponse
```json
{
"success": true,
"message": "Camera camera1 connection test passed",
"camera_name": "camera1",
"timestamp": "2024-01-01T12:00:00"
}
```
### CameraRecoveryResponse
```json
{
"success": true,
"message": "Camera camera1 reconnected successfully",
"camera_name": "camera1",
"operation": "reconnect",
"timestamp": "2024-01-01T12:00:00"
}
```
## Implementation Details
### CameraRecorder Methods
- `test_connection()`: Tests camera connection
- `reconnect()`: Performs soft reconnection
- `restart_grab()`: Restarts grab process
- `reset_timestamp()`: Resets timestamp
- `full_reset()`: Complete reset with cleanup and reinitialization
### CameraManager Methods
- `test_camera_connection(camera_name)`: Test specific camera
- `reconnect_camera(camera_name)`: Reconnect specific camera
- `restart_camera_grab(camera_name)`: Restart grab for specific camera
- `reset_camera_timestamp(camera_name)`: Reset timestamp for specific camera
- `full_reset_camera(camera_name)`: Full reset for specific camera
- `reinitialize_failed_camera(camera_name)`: Reinitialize failed camera
### State Management
All recovery operations automatically update the camera status in the state manager:
- Success: Status set to "connected"
- Failure: Status set to appropriate error state with error message
## Error Handling
The system includes comprehensive error handling:
- SDK exceptions are caught and logged
- State manager is updated with error information
- Proper HTTP status codes are returned
- Detailed error messages are provided
## Testing
Use the provided test files:
- `api-tests.http`: Manual API testing with VS Code REST Client
- `test_camera_recovery_api.py`: Automated testing script
## Safety Features
- Recording is automatically stopped before recovery operations
- Camera resources are properly cleaned up
- Thread-safe operations with proper locking
- Graceful error handling prevents system crashes
## Common Use Cases
1. **Camera Lost Connection**: Use reconnect
2. **Camera Won't Capture**: Use restart-grab
3. **Camera Initialization Failed**: Use reinitialize
4. **Persistent Issues**: Use full-reset
5. **Timing Problems**: Use reset-timestamp
This recovery system provides robust tools to handle most camera-related issues without requiring system restart or manual intervention.

View File

@@ -1,187 +0,0 @@
# MQTT Console Logging & API Guide
## 🎯 Overview
Your USDA Vision Camera System now has **enhanced MQTT console logging** and **comprehensive API endpoints** for monitoring machine status via MQTT.
## ✨ What's New
### 1. **Enhanced Console Logging**
- **Colorful emoji-based console output** for all MQTT events
- **Real-time visibility** of MQTT connections, subscriptions, and messages
- **Clear status indicators** for debugging and monitoring
### 2. **New MQTT Status API Endpoint**
- **GET /mqtt/status** - Detailed MQTT client statistics
- **Message counts, error tracking, uptime monitoring**
- **Real-time connection status and broker information**
### 3. **Existing Machine Status APIs** (already available)
- **GET /machines** - All machine states from MQTT
- **GET /system/status** - Overall system status including MQTT
## 🖥️ Console Logging Examples
When you run the system, you'll see:
```bash
🔗 MQTT CONNECTED: 192.168.1.110:1883
📋 MQTT SUBSCRIBED: vibratory_conveyor → vision/vibratory_conveyor/state
📋 MQTT SUBSCRIBED: blower_separator → vision/blower_separator/state
📡 MQTT MESSAGE: vibratory_conveyor → on
📡 MQTT MESSAGE: blower_separator → off
⚠️ MQTT DISCONNECTED: Unexpected disconnection (code: 1)
🔗 MQTT CONNECTED: 192.168.1.110:1883
```
## 🌐 API Endpoints
### MQTT Status
```http
GET http://localhost:8000/mqtt/status
```
**Response:**
```json
{
"connected": true,
"broker_host": "192.168.1.110",
"broker_port": 1883,
"subscribed_topics": [
"vision/vibratory_conveyor/state",
"vision/blower_separator/state"
],
"last_message_time": "2025-07-28T12:00:00",
"message_count": 42,
"error_count": 0,
"uptime_seconds": 3600.5
}
```
### Machine Status
```http
GET http://localhost:8000/machines
```
**Response:**
```json
{
"vibratory_conveyor": {
"name": "vibratory_conveyor",
"state": "on",
"last_updated": "2025-07-28T12:00:00",
"last_message": "on",
"mqtt_topic": "vision/vibratory_conveyor/state"
},
"blower_separator": {
"name": "blower_separator",
"state": "off",
"last_updated": "2025-07-28T12:00:00",
"last_message": "off",
"mqtt_topic": "vision/blower_separator/state"
}
}
```
### System Status
```http
GET http://localhost:8000/system/status
```
**Response:**
```json
{
"system_started": true,
"mqtt_connected": true,
"last_mqtt_message": "2025-07-28T12:00:00",
"machines": { ... },
"cameras": { ... },
"active_recordings": 0,
"total_recordings": 5,
"uptime_seconds": 3600.5
}
```
## 🚀 How to Use
### 1. **Start the Full System**
```bash
python main.py
```
You'll see enhanced console logging for all MQTT events.
### 2. **Test MQTT Demo (MQTT only)**
```bash
python demo_mqtt_console.py
```
Shows just the MQTT client with enhanced logging.
### 3. **Test API Endpoints**
```bash
python test_mqtt_logging.py
```
Tests all the API endpoints and shows expected responses.
### 4. **Query APIs Directly**
```bash
# Check MQTT status
curl http://localhost:8000/mqtt/status
# Check machine states
curl http://localhost:8000/machines
# Check overall system status
curl http://localhost:8000/system/status
```
## 🔧 Configuration
The MQTT settings are in `config.json`:
```json
{
"mqtt": {
"broker_host": "192.168.1.110",
"broker_port": 1883,
"username": null,
"password": null,
"topics": {
"vibratory_conveyor": "vision/vibratory_conveyor/state",
"blower_separator": "vision/blower_separator/state"
}
}
}
```
## 🎨 Console Output Features
- **🔗 Connection Events**: Green for successful connections
- **📋 Subscriptions**: Blue for topic subscriptions
- **📡 Messages**: Real-time message display with machine name and payload
- **⚠️ Warnings**: Yellow for unexpected disconnections
- **❌ Errors**: Red for connection failures and errors
- **❓ Unknown Topics**: Purple for unrecognized MQTT topics
## 📊 Monitoring & Debugging
### Real-time Monitoring
- **Console**: Watch live MQTT events as they happen
- **API**: Query `/mqtt/status` for statistics and health
- **Logs**: Check `usda_vision_system.log` for detailed logs
### Troubleshooting
1. **No MQTT messages?** Check broker connectivity and topic configuration
2. **Connection issues?** Verify broker host/port in config.json
3. **API not responding?** Ensure the system is running with `python main.py`
## 🎯 Use Cases
1. **Development**: See MQTT messages in real-time while developing
2. **Debugging**: Identify connection issues and message patterns
3. **Monitoring**: Use APIs to build dashboards or monitoring tools
4. **Integration**: Query machine states from external applications
5. **Maintenance**: Track MQTT statistics and error rates
---
**🎉 Your MQTT monitoring is now fully enhanced with both console logging and comprehensive APIs!**

View File

@@ -1,240 +0,0 @@
# 🎥 USDA Vision Camera Live Streaming Guide
This guide explains how to use the new live preview streaming functionality that allows you to view camera feeds in real-time without blocking recording operations.
## 🌟 Key Features
- **Non-blocking streaming**: Live preview doesn't interfere with recording
- **Separate camera connections**: Streaming uses independent camera instances
- **MJPEG streaming**: Standard web-compatible video streaming
- **Multiple concurrent viewers**: Multiple browsers can view the same stream
- **REST API control**: Start/stop streaming via API endpoints
- **Web interface**: Ready-to-use HTML interface for live preview
## 🏗️ Architecture
The streaming system creates separate camera connections for preview that are independent from recording:
```
Camera Hardware
├── Recording Connection (CameraRecorder)
│ ├── Used for video file recording
│ ├── Triggered by MQTT machine states
│ └── High quality, full FPS
└── Streaming Connection (CameraStreamer)
├── Used for live preview
├── Controlled via API endpoints
└── Optimized for web viewing (lower FPS, JPEG compression)
```
## 🚀 Quick Start
### 1. Start the System
```bash
python main.py
```
### 2. Open the Web Interface
Open `camera_preview.html` in your browser and click "Start Stream" for any camera.
### 3. API Usage
```bash
# Start streaming for camera1
curl -X POST http://localhost:8000/cameras/camera1/start-stream
# View live stream (open in browser)
http://localhost:8000/cameras/camera1/stream
# Stop streaming
curl -X POST http://localhost:8000/cameras/camera1/stop-stream
```
## 📡 API Endpoints
### Start Streaming
```http
POST /cameras/{camera_name}/start-stream
```
**Response:**
```json
{
"success": true,
"message": "Started streaming for camera camera1"
}
```
### Stop Streaming
```http
POST /cameras/{camera_name}/stop-stream
```
**Response:**
```json
{
"success": true,
"message": "Stopped streaming for camera camera1"
}
```
### Live Stream (MJPEG)
```http
GET /cameras/{camera_name}/stream
```
**Response:** Multipart MJPEG stream
**Content-Type:** `multipart/x-mixed-replace; boundary=frame`
## 🌐 Web Interface Usage
The included `camera_preview.html` provides a complete web interface:
1. **Camera Grid**: Shows all configured cameras
2. **Stream Controls**: Start/Stop/Refresh buttons for each camera
3. **Live Preview**: Real-time video feed display
4. **Status Information**: System and camera status
5. **Responsive Design**: Works on desktop and mobile
### Features:
- ✅ Real-time camera status
- ✅ One-click stream start/stop
- ✅ Automatic stream refresh
- ✅ System health monitoring
- ✅ Error handling and status messages
## 🔧 Technical Details
### Camera Streamer Configuration
- **Preview FPS**: 10 FPS (configurable)
- **JPEG Quality**: 70% (configurable)
- **Frame Buffer**: 5 frames (prevents memory buildup)
- **Timeout**: 200ms per frame capture
### Memory Management
- Automatic frame buffer cleanup
- Queue-based frame management
- Proper camera resource cleanup on stop
### Thread Safety
- Thread-safe streaming operations
- Independent from recording threads
- Proper synchronization with locks
## 🧪 Testing
### Run the Test Script
```bash
python test_streaming.py
```
This will test:
- ✅ API endpoint functionality
- ✅ Stream start/stop operations
- ✅ Concurrent recording and streaming
- ✅ Error handling
### Manual Testing
1. Start the system: `python main.py`
2. Open `camera_preview.html` in browser
3. Start streaming for a camera
4. Trigger recording via MQTT or manual API
5. Verify both work simultaneously
## 🔄 Concurrent Operations
The system supports these concurrent operations:
| Operation | Recording | Streaming | Notes |
|-----------|-----------|-----------|-------|
| Recording Only | ✅ | ❌ | Normal operation |
| Streaming Only | ❌ | ✅ | Preview without recording |
| Both Concurrent | ✅ | ✅ | **Independent connections** |
### Example: Concurrent Usage
```bash
# Start streaming
curl -X POST http://localhost:8000/cameras/camera1/start-stream
# Start recording (while streaming continues)
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{"filename": "test_recording.avi"}'
# Both operations run independently!
```
## 🛠️ Configuration
### Stream Settings (in CameraStreamer)
```python
self.preview_fps = 10.0 # Lower FPS for preview
self.preview_quality = 70 # JPEG quality (1-100)
self._frame_queue.maxsize = 5 # Frame buffer size
```
### Camera Settings
The streamer uses the same camera configuration as recording:
- Exposure time from `camera_config.exposure_ms`
- Gain from `camera_config.gain`
- Optimized trigger mode for continuous streaming
## 🚨 Important Notes
### Camera Access Patterns
- **Recording**: Blocks camera during active recording
- **Streaming**: Uses separate connection, doesn't block
- **Health Checks**: Brief, non-blocking camera tests
- **Multiple Streams**: Multiple browsers can view same stream
### Performance Considerations
- Streaming uses additional CPU/memory resources
- Lower preview FPS reduces system load
- JPEG compression reduces bandwidth usage
- Frame queue prevents memory buildup
### Error Handling
- Automatic camera resource cleanup
- Graceful handling of camera disconnections
- Stream auto-restart capabilities
- Detailed error logging
## 🔍 Troubleshooting
### Stream Not Starting
1. Check camera availability: `GET /cameras`
2. Verify camera not in error state
3. Check system logs for camera initialization errors
4. Try camera reconnection: `POST /cameras/{name}/reconnect`
### Poor Stream Quality
1. Adjust `preview_quality` setting (higher = better quality)
2. Increase `preview_fps` for smoother video
3. Check network bandwidth
4. Verify camera exposure/gain settings
### Browser Issues
1. Try different browser (Chrome/Firefox recommended)
2. Check browser console for JavaScript errors
3. Verify CORS settings in API server
4. Clear browser cache and refresh
## 📈 Future Enhancements
Potential improvements for the streaming system:
- 🔄 WebRTC support for lower latency
- 📱 Mobile app integration
- 🎛️ Real-time camera setting adjustments
- 📊 Stream analytics and monitoring
- 🔐 Authentication and access control
- 🌐 Multi-camera synchronized viewing
## 📞 Support
For issues with streaming functionality:
1. Check the system logs: `usda_vision_system.log`
2. Run the test script: `python test_streaming.py`
3. Verify API health: `http://localhost:8000/health`
4. Check camera status: `http://localhost:8000/cameras`
---
**✅ Live streaming is now ready for production use!**

View File

@@ -1,146 +0,0 @@
# GigE Camera Image Capture
This project provides simple Python scripts to connect to a GigE camera and capture images using the provided SDK.
## Files Overview
### Demo Files (provided with camera)
- `python demo/mvsdk.py` - Main SDK wrapper library
- `python demo/grab.py` - Basic image capture example
- `python demo/cv_grab.py` - OpenCV-based continuous capture
- `python demo/cv_grab_callback.py` - Callback-based capture
- `python demo/readme.txt` - Original demo documentation
### Custom Scripts
- `camera_capture.py` - Standalone script to capture 10 images with 200ms intervals
- `test.ipynb` - Jupyter notebook with the same functionality
- `images/` - Directory where captured images are saved
## Features
- **Automatic camera detection** - Finds and connects to available GigE cameras
- **Configurable capture** - Currently set to capture 10 images with 200ms intervals
- **Both mono and color support** - Automatically detects camera type
- **Timestamped filenames** - Images saved with date/time stamps
- **Error handling** - Robust error handling for camera operations
- **Cross-platform** - Works on Windows and Linux (with appropriate image flipping)
## Requirements
- Python 3.x
- OpenCV (`cv2`)
- NumPy
- Matplotlib (for Jupyter notebook display)
- GigE camera SDK (MVSDK) - included in `python demo/` directory
## Usage
### Option 1: Standalone Script
Run the standalone Python script:
```bash
python camera_capture.py
```
This will:
1. Initialize the camera SDK
2. Detect available cameras
3. Connect to the first camera found
4. Configure camera settings (manual exposure, continuous mode)
5. Capture 10 images with 200ms intervals
6. Save images to the `images/` directory
7. Clean up and close the camera
### Option 2: Jupyter Notebook
Open and run the `test.ipynb` notebook:
```bash
jupyter notebook test.ipynb
```
The notebook provides the same functionality but with:
- Step-by-step execution
- Detailed explanations
- Visual display of the last captured image
- Better error reporting
## Camera Configuration
The scripts are configured with the following default settings:
- **Trigger Mode**: Continuous capture (mode 0)
- **Exposure**: Manual, 30ms
- **Output Format**:
- Monochrome cameras: MONO8
- Color cameras: BGR8
- **Image Processing**: Automatic ISP processing from RAW to RGB/MONO
## Output
Images are saved in the `images/` directory with the following naming convention:
```
image_XX_YYYYMMDD_HHMMSS_mmm.jpg
```
Where:
- `XX` = Image number (01-10)
- `YYYYMMDD_HHMMSS_mmm` = Timestamp with milliseconds
Example: `image_01_20250722_140530_123.jpg`
## Troubleshooting
### Common Issues
1. **"No camera was found!"**
- Check camera connection (Ethernet cable)
- Verify camera power
- Check network settings (camera and PC should be on same subnet)
- Ensure camera drivers are installed
2. **"CameraInit Failed"**
- Camera might be in use by another application
- Check camera permissions
- Try restarting the camera or PC
3. **"Failed to capture image"**
- Check camera settings
- Verify sufficient lighting
- Check exposure settings
4. **Images appear upside down**
- This is handled automatically on Windows
- Linux users may need to adjust the flip settings
### Network Configuration
For GigE cameras, ensure:
- Camera and PC are on the same network segment
- PC network adapter supports Jumbo frames (recommended)
- Firewall allows camera communication
- Sufficient network bandwidth
## Customization
You can modify the scripts to:
- **Change capture count**: Modify the range in the capture loop
- **Adjust timing**: Change the `time.sleep(0.2)` value
- **Modify exposure**: Change the exposure time parameter
- **Change output format**: Modify file format and quality settings
- **Add image processing**: Insert processing steps before saving
## SDK Reference
The camera SDK (`mvsdk.py`) provides extensive functionality:
- Camera enumeration and initialization
- Image capture and processing
- Parameter configuration (exposure, gain, etc.)
- Trigger modes and timing
- Image format conversion
- Error handling
Refer to the original SDK documentation for advanced features.

View File

@@ -1,184 +0,0 @@
# USDA Vision Camera System - Implementation Summary
## 🎉 Project Completed Successfully!
The USDA Vision Camera System has been fully implemented and tested. All components are working correctly and the system is ready for deployment.
## ✅ What Was Built
### Core Architecture
- **Modular Design**: Clean separation of concerns across multiple modules
- **Multi-threading**: Concurrent MQTT listening, camera monitoring, and recording
- **Event-driven**: Thread-safe communication between components
- **Configuration-driven**: JSON-based configuration system
### Key Components
1. **MQTT Integration** (`usda_vision_system/mqtt/`)
- Listens to two machine topics: `vision/vibratory_conveyor/state` and `vision/blower_separator/state`
- Thread-safe message handling with automatic reconnection
- State normalization (on/off/error)
2. **Camera Management** (`usda_vision_system/camera/`)
- Automatic GigE camera discovery using python demo library
- Periodic status monitoring (every 2 seconds)
- Camera initialization and configuration management
- **Discovered Cameras**:
- Blower-Yield-Cam (192.168.1.165)
- Cracker-Cam (192.168.1.167)
3. **Video Recording** (`usda_vision_system/camera/recorder.py`)
- Automatic recording start/stop based on machine states
- Timestamp-based file naming: `camera1_recording_20250726_143022.avi`
- Configurable FPS, exposure, and gain settings
- Thread-safe recording with proper cleanup
4. **Storage Management** (`usda_vision_system/storage/`)
- Organized file storage under `./storage/camera1/` and `./storage/camera2/`
- File indexing and metadata tracking
- Automatic cleanup of old files
- Storage statistics and integrity checking
5. **REST API Server** (`usda_vision_system/api/`)
- FastAPI server on port 8000
- Real-time WebSocket updates
- Manual recording control endpoints
- System status and monitoring endpoints
6. **Comprehensive Logging** (`usda_vision_system/core/logging_config.py`)
- Colored console output
- Rotating log files
- Component-specific log levels
- Performance monitoring and error tracking
## 🚀 How to Use
### Quick Start
```bash
# Run system tests
python test_system.py
# Start the system
python main.py
# Or use the startup script
./start_system.sh
```
### Configuration
Edit `config.json` to customize:
- MQTT broker settings
- Camera configurations
- Storage paths
- System parameters
### API Access
- System status: `http://localhost:8000/system/status`
- Camera status: `http://localhost:8000/cameras`
- Manual recording: `POST http://localhost:8000/cameras/camera1/start-recording`
- Real-time updates: WebSocket at `ws://localhost:8000/ws`
## 📊 Test Results
All system tests passed successfully:
- ✅ Module imports
- ✅ Configuration loading
- ✅ Camera discovery (found 2 cameras)
- ✅ Storage setup
- ✅ MQTT configuration
- ✅ System initialization
- ✅ API endpoints
## 🔧 System Behavior
### Automatic Recording Flow
1. **Machine turns ON** → MQTT message received → Recording starts automatically
2. **Machine turns OFF** → MQTT message received → Recording stops and saves file
3. **Files saved** with timestamp: `camera1_recording_YYYYMMDD_HHMMSS.avi`
### Manual Control
- Start/stop recording via API calls
- Monitor system status in real-time
- Check camera availability on demand
### Dashboard Integration
The system is designed to integrate with your React + Vite + Tailwind + Supabase dashboard:
- REST API for status queries
- WebSocket for real-time updates
- JSON responses for easy frontend consumption
## 📁 Project Structure
```
usda_vision_system/
├── core/ # Configuration, state management, events, logging
├── mqtt/ # MQTT client and message handlers
├── camera/ # Camera management, monitoring, recording
├── storage/ # File organization and management
├── api/ # FastAPI server and WebSocket support
└── main.py # Application coordinator
Supporting Files:
├── main.py # Entry point script
├── config.json # System configuration
├── test_system.py # Test suite
├── start_system.sh # Startup script
└── README_SYSTEM.md # Comprehensive documentation
```
## 🎯 Key Features Delivered
-**Dual MQTT topic listening** for two machines
-**Automatic camera recording** triggered by machine states
-**GigE camera support** using python demo library
-**Thread-safe multi-tasking** (MQTT + camera monitoring + recording)
-**Timestamp-based file naming** in organized directories
-**2-second camera status monitoring** with on-demand checks
-**REST API and WebSocket** for dashboard integration
-**Comprehensive logging** with error tracking
-**Configuration management** via JSON
-**Storage management** with cleanup capabilities
-**Graceful startup/shutdown** with signal handling
## 🔮 Ready for Dashboard Integration
The system provides everything needed for your React dashboard:
```javascript
// Example API usage
const systemStatus = await fetch('http://localhost:8000/system/status');
const cameras = await fetch('http://localhost:8000/cameras');
// WebSocket for real-time updates
const ws = new WebSocket('ws://localhost:8000/ws');
ws.onmessage = (event) => {
const update = JSON.parse(event.data);
// Handle real-time system updates
};
// Manual recording control
await fetch('http://localhost:8000/cameras/camera1/start-recording', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ camera_name: 'camera1' })
});
```
## 🎊 Next Steps
The system is production-ready! You can now:
1. **Deploy** the system on your target hardware
2. **Integrate** with your existing React dashboard
3. **Configure** MQTT topics and camera settings as needed
4. **Monitor** system performance through logs and API endpoints
5. **Extend** functionality as requirements evolve
The modular architecture makes it easy to add new features, cameras, or MQTT topics in the future.
---
**System Status**: ✅ **FULLY OPERATIONAL**
**Test Results**: ✅ **ALL TESTS PASSING**
**Cameras Detected**: ✅ **2 GIGE CAMERAS READY**
**Ready for Production**: ✅ **YES**

View File

@@ -1 +0,0 @@
# USDA-Vision-Cameras

View File

@@ -1,249 +0,0 @@
# USDA Vision Camera System
A comprehensive system for monitoring machines via MQTT and automatically recording video from GigE cameras when machines are active.
## Overview
This system integrates MQTT machine monitoring with automated video recording from GigE cameras. When a machine turns on (detected via MQTT), the system automatically starts recording from the associated camera. When the machine turns off, recording stops and the video is saved with a timestamp.
## Features
- **MQTT Integration**: Listens to multiple machine state topics
- **Automatic Recording**: Starts/stops recording based on machine states
- **GigE Camera Support**: Uses the python demo library (mvsdk) for camera control
- **Multi-threading**: Concurrent MQTT listening, camera monitoring, and recording
- **REST API**: FastAPI server for dashboard integration
- **WebSocket Support**: Real-time status updates
- **Storage Management**: Organized file storage with cleanup capabilities
- **Comprehensive Logging**: Detailed logging with rotation and error tracking
- **Configuration Management**: JSON-based configuration system
## Architecture
```
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ MQTT Broker │ │ GigE Camera │ │ Dashboard │
│ │ │ │ │ (React) │
└─────────┬───────┘ └─────────┬───────┘ └─────────┬───────┘
│ │ │
│ Machine States │ Video Streams │ API Calls
│ │ │
┌─────────▼──────────────────────▼──────────────────────▼───────┐
│ USDA Vision Camera System │
├───────────────────────────────────────────────────────────────┤
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ MQTT Client │ │ Camera │ │ API Server │ │
│ │ │ │ Manager │ │ │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ State │ │ Storage │ │ Event │ │
│ │ Manager │ │ Manager │ │ System │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
└───────────────────────────────────────────────────────────────┘
```
## Installation
1. **Prerequisites**:
- Python 3.11+
- GigE cameras with python demo library
- MQTT broker (e.g., Mosquitto)
- uv package manager (recommended)
2. **Install Dependencies**:
```bash
uv sync
```
3. **Setup Storage Directory**:
```bash
sudo mkdir -p /storage
sudo chown $USER:$USER /storage
```
## Configuration
Edit `config.json` to configure your system:
```json
{
"mqtt": {
"broker_host": "192.168.1.110",
"broker_port": 1883,
"topics": {
"vibratory_conveyor": "vision/vibratory_conveyor/state",
"blower_separator": "vision/blower_separator/state"
}
},
"cameras": [
{
"name": "camera1",
"machine_topic": "vibratory_conveyor",
"storage_path": "/storage/camera1",
"exposure_ms": 1.0,
"gain": 3.5,
"target_fps": 3.0,
"enabled": true
}
]
}
```
## Usage
### Basic Usage
1. **Start the System**:
```bash
python main.py
```
2. **With Custom Config**:
```bash
python main.py --config my_config.json
```
3. **Debug Mode**:
```bash
python main.py --log-level DEBUG
```
### API Endpoints
The system provides a REST API on port 8000:
- `GET /system/status` - Overall system status
- `GET /cameras` - All camera statuses
- `GET /machines` - All machine states
- `POST /cameras/{name}/start-recording` - Manual recording start
- `POST /cameras/{name}/stop-recording` - Manual recording stop
- `GET /storage/stats` - Storage statistics
- `WebSocket /ws` - Real-time updates
### Dashboard Integration
The system is designed to integrate with your existing React + Vite + Tailwind + Supabase dashboard:
1. **API Integration**: Use the REST endpoints to display system status
2. **WebSocket**: Connect to `/ws` for real-time updates
3. **Supabase Storage**: Store recording metadata and system logs
## File Organization
```
/storage/
├── camera1/
│ ├── camera1_recording_20250726_143022.avi
│ └── camera1_recording_20250726_143155.avi
├── camera2/
│ ├── camera2_recording_20250726_143025.avi
│ └── camera2_recording_20250726_143158.avi
└── file_index.json
```
## Monitoring and Logging
### Log Files
- `usda_vision_system.log` - Main system log (rotated)
- Console output with colored formatting
- Component-specific log levels
### Performance Monitoring
The system includes built-in performance monitoring:
- Startup times
- Recording session metrics
- MQTT message processing rates
- Camera status check intervals
### Error Tracking
Comprehensive error tracking with:
- Error counts per component
- Detailed error context
- Automatic recovery attempts
## Troubleshooting
### Common Issues
1. **Camera Not Found**:
- Check camera connections
- Verify python demo library installation
- Run camera discovery: Check logs for enumeration results
2. **MQTT Connection Failed**:
- Verify broker IP and port
- Check network connectivity
- Verify credentials if authentication is enabled
3. **Recording Fails**:
- Check storage permissions
- Verify available disk space
- Check camera initialization logs
4. **API Server Won't Start**:
- Check if port 8000 is available
- Verify FastAPI dependencies
- Check firewall settings
### Debug Commands
```bash
# Check system status
curl http://localhost:8000/system/status
# Check camera status
curl http://localhost:8000/cameras
# Manual recording start
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{"camera_name": "camera1"}'
```
## Development
### Project Structure
```
usda_vision_system/
├── core/ # Core functionality
├── mqtt/ # MQTT client and handlers
├── camera/ # Camera management and recording
├── storage/ # File management
├── api/ # FastAPI server
└── main.py # Application coordinator
```
### Adding New Features
1. **New Camera Type**: Extend `camera/recorder.py`
2. **New MQTT Topics**: Update `config.json` and `mqtt/handlers.py`
3. **New API Endpoints**: Add to `api/server.py`
4. **New Events**: Define in `core/events.py`
### Testing
```bash
# Run basic system test
python -c "from usda_vision_system import USDAVisionSystem; s = USDAVisionSystem(); print('OK')"
# Test MQTT connection
python -c "from usda_vision_system.mqtt.client import MQTTClient; # ... test code"
# Test camera discovery
python -c "import sys; sys.path.append('python demo'); import mvsdk; print(len(mvsdk.CameraEnumerateDevice()))"
```
## License
This project is developed for USDA research purposes.
## Support
For issues and questions:
1. Check the logs in `usda_vision_system.log`
2. Review the troubleshooting section
3. Check API status at `http://localhost:8000/health`

View File

@@ -1,190 +0,0 @@
# Time Synchronization Setup - Atlanta, Georgia
## ✅ Time Synchronization Complete!
The USDA Vision Camera System has been configured for proper time synchronization with Atlanta, Georgia (Eastern Time Zone).
## 🕐 What Was Implemented
### System-Level Time Configuration
- **Timezone**: Set to `America/New_York` (Eastern Time)
- **Current Status**: Eastern Daylight Time (EDT, UTC-4)
- **NTP Sync**: Configured with multiple reliable time servers
- **Hardware Clock**: Synchronized with system time
### Application-Level Timezone Support
- **Timezone-Aware Timestamps**: All recordings use Atlanta time
- **Automatic DST Handling**: Switches between EST/EDT automatically
- **Time Sync Monitoring**: Built-in time synchronization checking
- **Consistent Formatting**: Standardized timestamp formats throughout
## 🔧 Key Features
### 1. Automatic Time Synchronization
```bash
# NTP servers configured:
- time.nist.gov (NIST atomic clock)
- pool.ntp.org (NTP pool)
- time.google.com (Google time)
- time.cloudflare.com (Cloudflare time)
```
### 2. Timezone-Aware Recording Filenames
```
Example: camera1_recording_20250725_213241.avi
Format: {camera}_{type}_{YYYYMMDD_HHMMSS}.avi
Time: Atlanta local time (EDT/EST)
```
### 3. Time Verification Tools
- **Startup Check**: Automatic time sync verification on system start
- **Manual Check**: `python check_time.py` for on-demand verification
- **API Integration**: Time sync status available via REST API
### 4. Comprehensive Logging
```
=== TIME SYNCHRONIZATION STATUS ===
System time: 2025-07-25 21:32:41 EDT
Timezone: EDT (-0400)
Daylight Saving: Yes
Sync status: synchronized
Time difference: 0.10 seconds
=====================================
```
## 🚀 Usage
### Automatic Operation
The system automatically:
- Uses Atlanta time for all timestamps
- Handles daylight saving time transitions
- Monitors time synchronization status
- Logs time-related events
### Manual Verification
```bash
# Check time synchronization
python check_time.py
# Test timezone functions
python test_timezone.py
# View system time status
timedatectl status
```
### API Endpoints
```bash
# System status includes time info
curl http://localhost:8000/system/status
# Example response includes:
{
"system_started": true,
"uptime_seconds": 3600,
"timestamp": "2025-07-25T21:32:41-04:00"
}
```
## 📊 Current Status
### Time Synchronization
-**System Timezone**: America/New_York (EDT)
-**NTP Sync**: Active and synchronized
-**Time Accuracy**: Within 0.1 seconds of atomic time
-**DST Support**: Automatic EST/EDT switching
### Application Integration
-**Recording Timestamps**: Atlanta time zone
-**Log Timestamps**: Timezone-aware logging
-**API Responses**: ISO format with timezone
-**File Naming**: Consistent Atlanta time format
### Monitoring
-**Startup Verification**: Time sync checked on boot
-**Continuous Monitoring**: Built-in sync status tracking
-**Error Detection**: Alerts for time drift issues
-**Manual Tools**: On-demand verification scripts
## 🔍 Technical Details
### Timezone Configuration
```json
{
"system": {
"timezone": "America/New_York"
}
}
```
### Time Sources
1. **Primary**: NIST atomic clock (time.nist.gov)
2. **Secondary**: NTP pool servers (pool.ntp.org)
3. **Backup**: Google/Cloudflare time servers
4. **Fallback**: Local system clock
### File Naming Convention
```
Pattern: {camera_name}_recording_{YYYYMMDD_HHMMSS}.avi
Example: camera1_recording_20250725_213241.avi
Timezone: Always Atlanta local time (EST/EDT)
```
## 🎯 Benefits
### For Operations
- **Consistent Timestamps**: All recordings use Atlanta time
- **Easy Correlation**: Timestamps match local business hours
- **Automatic DST**: No manual timezone adjustments needed
- **Reliable Sync**: Multiple time sources ensure accuracy
### For Analysis
- **Local Time Context**: Recordings timestamped in business timezone
- **Accurate Sequencing**: Precise timing for event correlation
- **Standard Format**: Consistent naming across all recordings
- **Audit Trail**: Complete time synchronization logging
### For Integration
- **Dashboard Ready**: Timezone-aware API responses
- **Database Compatible**: ISO format timestamps with timezone
- **Log Analysis**: Structured time information in logs
- **Monitoring**: Built-in time sync health checks
## 🔧 Maintenance
### Regular Checks
The system automatically:
- Verifies time sync on startup
- Logs time synchronization status
- Monitors for time drift
- Alerts on sync failures
### Manual Maintenance
```bash
# Force time sync
sudo systemctl restart systemd-timesyncd
# Check NTP status
timedatectl show-timesync --all
# Verify timezone
timedatectl status
```
## 📈 Next Steps
The time synchronization is now fully operational. The system will:
1. **Automatically maintain** accurate Atlanta time
2. **Generate timestamped recordings** with local time
3. **Monitor sync status** and alert on issues
4. **Provide timezone-aware** API responses for dashboard integration
All recording files will now have accurate Atlanta timestamps, making it easy to correlate with local business operations and machine schedules.
---
**Time Sync Status**: ✅ **SYNCHRONIZED**
**Timezone**: ✅ **America/New_York (EDT)**
**Accuracy**: ✅ **±0.1 seconds**
**Ready for Production**: ✅ **YES**

View File

@@ -1,191 +0,0 @@
# Camera Video Recorder
A Python script for recording videos from GigE cameras using the provided SDK with custom exposure and gain settings.
## Features
- **List all available cameras** - Automatically detects and displays all connected cameras
- **Custom camera settings** - Set exposure time to 1ms and gain to 3.5x (or custom values)
- **Video recording** - Record videos in AVI format with timestamp filenames
- **Live preview** - Test camera functionality with live preview mode
- **Interactive menu** - User-friendly menu system for all operations
- **Automatic cleanup** - Proper resource management and cleanup
## Requirements
- Python 3.x
- OpenCV (`cv2`)
- NumPy
- Camera SDK (mvsdk) - included in `python demo` directory
- GigE camera connected to the system
## Installation
1. Ensure your GigE camera is connected and properly configured
2. Make sure the `python demo` directory with `mvsdk.py` is present
3. Install required Python packages:
```bash
pip install opencv-python numpy
```
## Usage
### Basic Usage
Run the script:
```bash
python camera_video_recorder.py
```
The script will:
1. Display a welcome message and feature overview
2. List all available cameras
3. Let you select a camera (if multiple are available)
4. Allow you to set custom exposure and gain values
5. Present an interactive menu with options
### Menu Options
1. **Start Recording** - Begin video recording with timestamp filename
2. **List Camera Info** - Display detailed camera information
3. **Test Camera (Live Preview)** - View live camera feed without recording
4. **Exit** - Clean up and exit the program
### Default Settings
- **Exposure Time**: 1.0ms (1000 microseconds)
- **Gain**: 3.5x
- **Video Format**: AVI with XVID codec
- **Frame Rate**: 30 FPS
- **Output Directory**: `videos/` (created automatically)
### Recording Controls
- **Start Recording**: Select option 1 from the menu
- **Stop Recording**: Press 'q' in the preview window
- **Video Files**: Saved as `videos/camera_recording_YYYYMMDD_HHMMSS.avi`
## File Structure
```
camera_video_recorder.py # Main script
python demo/
mvsdk.py # Camera SDK wrapper
(other demo files)
videos/ # Output directory (created automatically)
camera_recording_*.avi # Recorded video files
```
## Script Features
### CameraVideoRecorder Class
- `list_cameras()` - Enumerate and display available cameras
- `initialize_camera()` - Set up camera with custom exposure and gain
- `start_recording()` - Initialize video writer and begin recording
- `stop_recording()` - Stop recording and save video file
- `record_loop()` - Main recording loop with live preview
- `cleanup()` - Proper resource cleanup
### Key Functions
- **Camera Detection**: Automatically finds all connected GigE cameras
- **Settings Validation**: Checks and clamps exposure/gain values to camera limits
- **Frame Processing**: Handles both monochrome and color cameras
- **Windows Compatibility**: Handles frame flipping for Windows systems
- **Error Handling**: Comprehensive error handling and user feedback
## Example Output
```
Camera Video Recorder
====================
This script allows you to:
- List all available cameras
- Record videos with custom exposure (1ms) and gain (3.5x) settings
- Save videos with timestamps
- Stop recording anytime with 'q' key
Found 1 camera(s):
0: GigE Camera Model (GigE) - SN: 12345678
Using camera: GigE Camera Model
Camera Settings:
Enter exposure time in ms (default 1.0): 1.0
Enter gain value (default 3.5): 3.5
Initializing camera with:
- Exposure: 1.0ms
- Gain: 3.5x
Camera type: Color
Set exposure time: 1000.0μs
Set analog gain: 3.50x (range: 1.00 - 16.00)
Camera started successfully
==================================================
Camera Video Recorder Menu
==================================================
1. Start Recording
2. List Camera Info
3. Test Camera (Live Preview)
4. Exit
Select option (1-4): 1
Started recording to: videos/camera_recording_20241223_143022.avi
Frame size: (1920, 1080), FPS: 30.0
Press 'q' to stop recording...
Recording... Press 'q' in the preview window to stop
Recording stopped!
Saved: videos/camera_recording_20241223_143022.avi
Frames recorded: 450
Duration: 15.2 seconds
Average FPS: 29.6
```
## Troubleshooting
### Common Issues
1. **"No cameras found!"**
- Check camera connection
- Verify camera power
- Ensure network configuration for GigE cameras
2. **"SDK initialization failed"**
- Verify `python demo/mvsdk.py` exists
- Check camera drivers are installed
3. **"Camera initialization failed"**
- Camera may be in use by another application
- Try disconnecting and reconnecting the camera
4. **Recording issues**
- Ensure sufficient disk space
- Check write permissions in the output directory
### Performance Tips
- Close other applications using the camera
- Ensure adequate system resources (CPU, RAM)
- Use SSD storage for better write performance
- Adjust frame rate if experiencing dropped frames
## Customization
You can modify the script to:
- Change video codec (currently XVID)
- Adjust target frame rate
- Modify output filename format
- Add additional camera settings
- Change preview window size
## Notes
- Videos are saved in the `videos/` directory with timestamp filenames
- The script handles both monochrome and color cameras automatically
- Frame flipping is handled automatically for Windows systems
- All resources are properly cleaned up on exit

View File

@@ -1,220 +0,0 @@
# 🎥 Camera Route Implementation Guide
This document explains the implementation of the new public camera live view routes (`/camera#/live`) that don't require authentication.
## 🚀 What Was Implemented
### 1. **LiveCameraView Component** (`src/components/LiveCameraView.tsx`)
- Displays live camera feed without authentication requirements
- Handles streaming start/stop automatically
- Provides error handling and loading states
- Full-screen live view with camera label and status indicator
### 2. **CameraRoute Component** (`src/components/CameraRoute.tsx`)
- Validates camera route parameters
- Ensures only valid camera numbers (camera1, camera2, etc.) are accepted
- Renders the LiveCameraView for valid routes
### 3. **Updated App.tsx**
- Added route pattern matching for `/camera#/live`
- Integrated camera routes into existing authentication flow
- Maintains backward compatibility with existing functionality
### 4. **Test Page** (`public/camera-test.html`)
- Simple HTML page to test camera routes
- Provides links to test different camera numbers
- Explains expected behavior
## 📋 Required Dependencies
The following packages need to be installed to complete the implementation:
```bash
# Install React Router
npm install react-router-dom
# Install TypeScript types
npm install --save-dev @types/react-router-dom
```
**Note:** Due to permission issues, these packages couldn't be installed automatically. You'll need to resolve the permissions or install them manually.
## 🔧 How to Complete the Setup
### Option 1: Fix Permissions and Install
```bash
# Fix node_modules permissions
sudo chown -R $USER:$USER node_modules
sudo chmod -R 755 node_modules
# Install dependencies
npm install
```
### Option 2: Manual Installation
```bash
# Remove problematic node_modules
rm -rf node_modules
# Reinstall everything
npm install
```
### Option 3: Use Yarn Instead
```bash
# Install yarn if not available
npm install -g yarn
# Install dependencies with yarn
yarn install
```
## 🧪 Testing the Implementation
### 1. **Start the Development Server**
```bash
npm run dev
```
### 2. **Test Camera Routes**
Open these URLs in your browser:
- `http://localhost:5173/camera1/live` - Live view of camera1
- `http://localhost:5173/camera2/live` - Live view of camera2
- `http://localhost:5173/camera3/live` - Live view of camera3
### 3. **Use the Test Page**
Open `http://localhost:5173/camera-test.html` to access the test interface.
### 4. **Expected Behavior**
-**Valid routes** should show live camera feed
-**Invalid routes** should show error message
- 🔒 **Protected routes** should redirect to login
## 🏗️ Architecture Details
### Route Pattern
```
/camera{number}/live
```
- `{number}` must be a positive integer
- Examples: `/camera1/live`, `/camera2/live`, `/camera10/live`
- Invalid: `/camera/live`, `/camera0/live`, `/camera-1/live`
### Component Flow
```
App.tsx → Route Detection → CameraRoute → LiveCameraView
```
### API Integration
The LiveCameraView component integrates with existing camera API endpoints:
- `POST /cameras/{camera_name}/start-stream` - Start streaming
- `GET /cameras/{camera_name}/stream` - Get MJPEG stream
- `POST /cameras/{camera_name}/stop-stream` - Stop streaming
## 🎯 Key Features
### ✅ **Public Access**
- No authentication required
- Anyone can view live camera feeds
- Perfect for monitoring displays
### ✅ **Non-Blocking Streaming**
- Uses existing CameraStreamer infrastructure
- Separate camera connections for streaming vs. recording
- Doesn't interfere with recording operations
### ✅ **Real-time Video**
- MJPEG format for low latency
- Automatic stream management
- Error handling and retry functionality
### ✅ **Responsive Design**
- Full-screen live view
- Camera identification labels
- Live status indicators
## 🔍 Troubleshooting
### Common Issues
#### 1. **Permission Errors During Installation**
```bash
# Fix ownership
sudo chown -R $USER:$USER .
# Fix permissions
sudo chmod -R 755 .
```
#### 2. **Camera Stream Not Loading**
- Check if camera API is running (`http://localhost:8000`)
- Verify camera configuration in `config.compose.json`
- Check browser console for errors
#### 3. **Route Not Working**
- Ensure React app is running
- Check browser console for routing errors
- Verify component imports are correct
#### 4. **TypeScript Errors**
- Install missing type definitions
- Check import paths
- Verify component interfaces
### Debug Steps
1. Check browser console for errors
2. Verify API endpoints are accessible
3. Test camera streaming directly via API
4. Check component rendering in React DevTools
## 🚀 Next Steps
### Immediate
1. Install required dependencies
2. Test basic functionality
3. Verify camera streaming works
### Future Enhancements
1. **Add React Router** for better routing
2. **Implement URL-based navigation** between cameras
3. **Add camera selection interface**
4. **Implement stream quality controls**
5. **Add recording controls** (if needed)
### Production Considerations
1. **Security**: Consider adding rate limiting
2. **Performance**: Optimize for multiple concurrent viewers
3. **Monitoring**: Add analytics and usage tracking
4. **Access Control**: Implement optional authentication if needed
## 📚 Related Documentation
- [Camera API Documentation](../camera-management-api/docs/API_DOCUMENTATION.md)
- [Streaming Guide](../camera-management-api/docs/guides/STREAMING_GUIDE.md)
- [Vision System README](VISION_SYSTEM_README.md)
## 🤝 Support
If you encounter issues:
1. Check the troubleshooting section above
2. Review browser console for error messages
3. Verify camera API is running and accessible
4. Test API endpoints directly with curl or Postman
---
**Implementation Status**: ✅ Components Created | ⚠️ Dependencies Pending | <20><> Ready for Testing

View File

@@ -1,510 +0,0 @@
# New Database Schema Documentation
## Overview
The database has been restructured to support a more flexible experiment phase system. Each experiment can now have different combinations of phases (soaking, airdrying, cracking, shelling), and each phase has its own parameters stored in separate tables.
## Key Changes
### 1. Experiment Phases Table
The `experiment_phases` table now includes boolean flags to indicate which phases are enabled:
```sql
CREATE TABLE public.experiment_phases (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name TEXT NOT NULL UNIQUE,
description TEXT,
has_soaking BOOLEAN NOT NULL DEFAULT false,
has_airdrying BOOLEAN NOT NULL DEFAULT false,
has_cracking BOOLEAN NOT NULL DEFAULT false,
has_shelling BOOLEAN NOT NULL DEFAULT false,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
**Constraint**: At least one phase must be selected (`has_soaking = true OR has_airdrying = true OR has_cracking = true OR has_shelling = true`).
### 2. Machine Types Table
New table to support different cracking machines:
```sql
CREATE TABLE public.machine_types (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name TEXT NOT NULL UNIQUE,
description TEXT,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
**Default machine types**:
- JC Cracker
- Meyer Cracker
### 3. Phase-Specific Tables
#### Soaking Table
```sql
CREATE TABLE public.soaking (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_id UUID NOT NULL REFERENCES public.experiments(id) ON DELETE CASCADE,
repetition_id UUID REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
soaking_duration_minutes INTEGER NOT NULL CHECK (soaking_duration_minutes > 0),
scheduled_end_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
**Features**:
- `scheduled_end_time` is automatically calculated from `scheduled_start_time + soaking_duration_minutes`
- Can be associated with either an experiment (template) or a specific repetition
#### Airdrying Table
```sql
CREATE TABLE public.airdrying (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_id UUID NOT NULL REFERENCES public.experiments(id) ON DELETE CASCADE,
repetition_id UUID REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
duration_minutes INTEGER NOT NULL CHECK (duration_minutes > 0),
scheduled_end_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
**Features**:
- `scheduled_start_time` is automatically set to the soaking's `scheduled_end_time` if not provided
- `scheduled_end_time` is automatically calculated from `scheduled_start_time + duration_minutes`
#### Cracking Table
```sql
CREATE TABLE public.cracking (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_id UUID NOT NULL REFERENCES public.experiments(id) ON DELETE CASCADE,
repetition_id UUID REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
machine_type_id UUID NOT NULL REFERENCES public.machine_types(id),
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
**Features**:
- `scheduled_start_time` is automatically set to the airdrying's `scheduled_end_time` if not provided
- No duration or scheduled end time (user sets actual end time)
- Requires selection of machine type
#### Shelling Table
```sql
CREATE TABLE public.shelling (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_id UUID NOT NULL REFERENCES public.experiments(id) ON DELETE CASCADE,
repetition_id UUID REFERENCES public.experiment_repetitions(id) ON DELETE CASCADE,
scheduled_start_time TIMESTAMP WITH TIME ZONE NOT NULL,
actual_start_time TIMESTAMP WITH TIME ZONE,
actual_end_time TIMESTAMP WITH TIME ZONE,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
### 4. Machine-Specific Parameter Tables
#### JC Cracker Parameters
```sql
CREATE TABLE public.jc_cracker_parameters (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
cracking_id UUID NOT NULL REFERENCES public.cracking(id) ON DELETE CASCADE,
plate_contact_frequency_hz DOUBLE PRECISION NOT NULL CHECK (plate_contact_frequency_hz > 0),
throughput_rate_pecans_sec DOUBLE PRECISION NOT NULL CHECK (throughput_rate_pecans_sec > 0),
crush_amount_in DOUBLE PRECISION NOT NULL CHECK (crush_amount_in >= 0),
entry_exit_height_diff_in DOUBLE PRECISION NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
```
#### Meyer Cracker Parameters
```sql
CREATE TABLE public.meyer_cracker_parameters (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
cracking_id UUID NOT NULL REFERENCES public.cracking(id) ON DELETE CASCADE,
motor_speed_hz DOUBLE PRECISION NOT NULL CHECK (motor_speed_hz > 0),
jig_displacement_inches DOUBLE PRECISION NOT NULL CHECK (jig_displacement_inches >= 0),
spring_stiffness_nm DOUBLE PRECISION NOT NULL CHECK (spring_stiffness_nm > 0),
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW()
);
```
### 5. Updated Experiments Table
```sql
CREATE TABLE public.experiments (
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
experiment_number INTEGER UNIQUE NOT NULL,
reps_required INTEGER NOT NULL CHECK (reps_required > 0),
weight_per_repetition_lbs DOUBLE PRECISION NOT NULL DEFAULT 0 CHECK (weight_per_repetition_lbs > 0),
results_status TEXT NOT NULL DEFAULT 'valid' CHECK (results_status IN ('valid', 'invalid')),
completion_status BOOLEAN NOT NULL DEFAULT false,
phase_id UUID REFERENCES public.experiment_phases(id) ON DELETE SET NULL,
soaking_id UUID REFERENCES public.soaking(id) ON DELETE SET NULL,
airdrying_id UUID REFERENCES public.airdrying(id) ON DELETE SET NULL,
cracking_id UUID REFERENCES public.cracking(id) ON DELETE SET NULL,
shelling_id UUID REFERENCES public.shelling(id) ON DELETE SET NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
created_by UUID NOT NULL REFERENCES public.user_profiles(id)
);
```
**Changes**:
- Added `weight_per_repetition_lbs` field
- Added foreign key references to phase tables
- Removed phase-specific parameters (moved to phase tables)
## Database Views
### experiments_with_phases
A comprehensive view that joins experiments with all their phase information:
```sql
CREATE VIEW public.experiments_with_phases AS
SELECT
e.*,
ep.name as phase_name,
ep.description as phase_description,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
s.id as soaking_id,
s.scheduled_start_time as soaking_scheduled_start,
s.actual_start_time as soaking_actual_start,
s.soaking_duration_minutes,
s.scheduled_end_time as soaking_scheduled_end,
s.actual_end_time as soaking_actual_end,
ad.id as airdrying_id,
ad.scheduled_start_time as airdrying_scheduled_start,
ad.actual_start_time as airdrying_actual_start,
ad.duration_minutes as airdrying_duration,
ad.scheduled_end_time as airdrying_scheduled_end,
ad.actual_end_time as airdrying_actual_end,
c.id as cracking_id,
c.scheduled_start_time as cracking_scheduled_start,
c.actual_start_time as cracking_actual_start,
c.actual_end_time as cracking_actual_end,
mt.name as machine_type_name,
sh.id as shelling_id,
sh.scheduled_start_time as shelling_scheduled_start,
sh.actual_start_time as shelling_actual_start,
sh.actual_end_time as shelling_actual_end
FROM public.experiments e
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN public.soaking s ON e.soaking_id = s.id
LEFT JOIN public.airdrying ad ON e.airdrying_id = ad.id
LEFT JOIN public.cracking c ON e.cracking_id = c.id
LEFT JOIN public.machine_types mt ON c.machine_type_id = mt.id
LEFT JOIN public.shelling sh ON e.shelling_id = sh.id;
```
### repetitions_with_phases
A view for repetitions with their phase information:
```sql
CREATE VIEW public.repetitions_with_phases AS
SELECT
er.*,
e.experiment_number,
e.weight_per_repetition_lbs,
ep.name as phase_name,
ep.has_soaking,
ep.has_airdrying,
ep.has_cracking,
ep.has_shelling,
-- ... (similar phase fields as above)
FROM public.experiment_repetitions er
JOIN public.experiments e ON er.experiment_id = e.id
LEFT JOIN public.experiment_phases ep ON e.phase_id = ep.id
LEFT JOIN public.soaking s ON er.id = s.repetition_id
LEFT JOIN public.airdrying ad ON er.id = ad.repetition_id
LEFT JOIN public.cracking c ON er.id = c.repetition_id
LEFT JOIN public.machine_types mt ON c.machine_type_id = mt.id
LEFT JOIN public.shelling sh ON er.id = sh.repetition_id;
```
## TypeScript Interfaces
### New Interfaces Added
```typescript
// Machine Types
export interface MachineType {
id: string
name: string
description?: string | null
created_at: string
updated_at: string
created_by: string
}
// Phase-specific interfaces
export interface Soaking {
id: string
experiment_id: string
repetition_id?: string | null
scheduled_start_time: string
actual_start_time?: string | null
soaking_duration_minutes: number
scheduled_end_time: string
actual_end_time?: string | null
created_at: string
updated_at: string
created_by: string
}
export interface Airdrying {
id: string
experiment_id: string
repetition_id?: string | null
scheduled_start_time: string
actual_start_time?: string | null
duration_minutes: number
scheduled_end_time: string
actual_end_time?: string | null
created_at: string
updated_at: string
created_by: string
}
export interface Cracking {
id: string
experiment_id: string
repetition_id?: string | null
machine_type_id: string
scheduled_start_time: string
actual_start_time?: string | null
actual_end_time?: string | null
created_at: string
updated_at: string
created_by: string
}
export interface Shelling {
id: string
experiment_id: string
repetition_id?: string | null
scheduled_start_time: string
actual_start_time?: string | null
actual_end_time?: string | null
created_at: string
updated_at: string
created_by: string
}
// Machine-specific parameter interfaces
export interface JCCrackerParameters {
id: string
cracking_id: string
plate_contact_frequency_hz: number
throughput_rate_pecans_sec: number
crush_amount_in: number
entry_exit_height_diff_in: number
created_at: string
updated_at: string
}
export interface MeyerCrackerParameters {
id: string
cracking_id: string
motor_speed_hz: number
jig_displacement_inches: number
spring_stiffness_nm: number
created_at: string
updated_at: string
}
```
### Updated Interfaces
```typescript
export interface ExperimentPhase {
id: string
name: string
description?: string | null
has_soaking: boolean
has_airdrying: boolean
has_cracking: boolean
has_shelling: boolean
created_at: string
updated_at: string
created_by: string
}
export interface Experiment {
id: string
experiment_number: number
reps_required: number
weight_per_repetition_lbs: number
results_status: ResultsStatus
completion_status: boolean
phase_id?: string | null
soaking_id?: string | null
airdrying_id?: string | null
cracking_id?: string | null
shelling_id?: string | null
created_at: string
updated_at: string
created_by: string
}
```
## API Management Functions
### New Management Objects
1. **machineTypeManagement**: Functions to manage machine types
2. **phaseManagement**: Functions to manage all phase-specific data
3. **machineParameterManagement**: Functions to manage machine-specific parameters
### Key Functions
```typescript
// Machine Type Management
machineTypeManagement.getAllMachineTypes()
machineTypeManagement.getMachineTypeById(id)
// Phase Management
phaseManagement.createSoaking(request)
phaseManagement.createAirdrying(request)
phaseManagement.createCracking(request)
phaseManagement.createShelling(request)
// Machine Parameter Management
machineParameterManagement.createJCCrackerParameters(request)
machineParameterManagement.createMeyerCrackerParameters(request)
```
## Usage Examples
### Creating an Experiment with Phases
1. **Create Experiment Phase Definition**:
```typescript
const phaseData = {
name: "Full Process Experiment",
description: "Complete pecan processing with all phases",
has_soaking: true,
has_airdrying: true,
has_cracking: true,
has_shelling: true
};
const phase = await experimentPhaseManagement.createExperimentPhase(phaseData);
```
2. **Create Experiment**:
```typescript
const experimentData = {
experiment_number: 1001,
reps_required: 3,
weight_per_repetition_lbs: 50.0,
phase_id: phase.id
};
const experiment = await experimentManagement.createExperiment(experimentData);
```
3. **Create Phase Data** (if phases are enabled):
```typescript
// Soaking
const soakingData = {
experiment_id: experiment.id,
scheduled_start_time: "2025-01-15T08:00:00Z",
soaking_duration_minutes: 120
};
const soaking = await phaseManagement.createSoaking(soakingData);
// Airdrying (scheduled start will be auto-calculated from soaking end)
const airdryingData = {
experiment_id: experiment.id,
duration_minutes: 180
};
const airdrying = await phaseManagement.createAirdrying(airdryingData);
// Cracking
const crackingData = {
experiment_id: experiment.id,
machine_type_id: "jc-cracker-id", // or meyer-cracker-id
// scheduled start will be auto-calculated from airdrying end
};
const cracking = await phaseManagement.createCracking(crackingData);
// Add machine-specific parameters
const jcParams = {
cracking_id: cracking.id,
plate_contact_frequency_hz: 60.0,
throughput_rate_pecans_sec: 2.5,
crush_amount_in: 0.25,
entry_exit_height_diff_in: 0.5
};
await machineParameterManagement.createJCCrackerParameters(jcParams);
```
4. **Update Experiment with Phase References**:
```typescript
await experimentManagement.updateExperiment(experiment.id, {
soaking_id: soaking.id,
airdrying_id: airdrying.id,
cracking_id: cracking.id
});
```
### Creating Repetitions with Phase Data
When creating repetitions, you can create phase-specific data for each repetition:
```typescript
// Create repetition
const repetition = await repetitionManagement.createRepetition({
experiment_id: experiment.id,
repetition_number: 1
});
// Create phase data for this specific repetition
const repetitionSoaking = await phaseManagement.createSoaking({
experiment_id: experiment.id,
repetition_id: repetition.id,
scheduled_start_time: "2025-01-15T08:00:00Z",
soaking_duration_minutes: 120
});
```
## Migration Notes
- The old phase-specific parameters in the `experiments` table are preserved for backward compatibility
- New experiments should use the new phase-specific tables
- The existing draft system continues to work with the new schema
- All RLS policies are properly configured for the new tables
## Benefits of New Schema
1. **Flexibility**: Each experiment can have different combinations of phases
2. **Machine Support**: Easy to add new machine types with different parameters
3. **Separation of Concerns**: Phase parameters are stored in dedicated tables
4. **Automatic Calculations**: Scheduled times are automatically calculated based on phase dependencies
5. **Scalability**: Easy to add new phases or modify existing ones
6. **Data Integrity**: Strong constraints ensure data consistency

View File

@@ -1,69 +0,0 @@
# React + TypeScript + Vite
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
Currently, two official plugins are available:
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Babel](https://babeljs.io/) for Fast Refresh
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/) for Fast Refresh
## Expanding the ESLint configuration
If you are developing a production application, we recommend updating the configuration to enable type-aware lint rules:
```js
export default tseslint.config([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Remove tseslint.configs.recommended and replace with this
...tseslint.configs.recommendedTypeChecked,
// Alternatively, use this for stricter rules
...tseslint.configs.strictTypeChecked,
// Optionally, add this for stylistic rules
...tseslint.configs.stylisticTypeChecked,
// Other configs...
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
])
```
You can also install [eslint-plugin-react-x](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-x) and [eslint-plugin-react-dom](https://github.com/Rel1cx/eslint-react/tree/main/packages/plugins/eslint-plugin-react-dom) for React-specific lint rules:
```js
// eslint.config.js
import reactX from 'eslint-plugin-react-x'
import reactDom from 'eslint-plugin-react-dom'
export default tseslint.config([
globalIgnores(['dist']),
{
files: ['**/*.{ts,tsx}'],
extends: [
// Other configs...
// Enable lint rules for React
reactX.configs['recommended-typescript'],
// Enable lint rules for React DOM
reactDom.configs.recommended,
],
languageOptions: {
parserOptions: {
project: ['./tsconfig.node.json', './tsconfig.app.json'],
tsconfigRootDir: import.meta.dirname,
},
// other options...
},
},
])
```

View File

@@ -1,118 +0,0 @@
# Seed Data Corrections
## Problem Identified
The "Schedule Experiment" component was showing "undefined" for soaking duration in the top right panel. This was caused by a mismatch between the database schema and frontend expectations:
- **Database**: Used `soaking_duration_hours` column
- **Frontend**: Expected `soaking_duration_minutes` field
- **Seed Files**: Were trying to insert into non-existent `soaking_duration_minutes` column
## Root Cause
1. The migration `20250101000004_data_entry_tables.sql` created the `soaking` table with `soaking_duration_hours` column
2. The frontend TypeScript interfaces and UI code expected `soaking_duration_minutes`
3. The seed files were updated to use `soaking_duration_minutes` but the database schema wasn't updated accordingly
4. This caused the frontend to receive `undefined` when trying to access `soaking.soaking_duration_minutes`
## Solution Implemented
### 1. Database Schema Fix
**File**: `supabase/migrations/20250103000001_fix_soaking_duration_column.sql`
- Added `soaking_duration_minutes` INTEGER column
- Backfilled data from `soaking_duration_hours * 60`
- Updated trigger function to use minutes
- Dropped the old `soaking_duration_hours` column
### 2. Views Update
**File**: `supabase/migrations/20250103000002_update_views_for_soaking_minutes.sql`
- Updated `experiments_with_phases` view to use `soaking_duration_minutes`
- Updated `repetitions_with_phases` view to use `soaking_duration_minutes`
### 3. Corrected Seed Files
#### Phase 2 JC Experiments
**File**: `supabase/seed_04_phase2_jc_experiments_corrected.sql`
- **Experiment Numbers**: 0-19 (matching CSV data, not 1-20)
- **Soaking Duration**: Converted from hours to minutes (e.g., 34 hours = 2040 minutes)
- **Air Drying Duration**: Used exact values from CSV (19-60 minutes)
- **JC Cracker Parameters**: All parameters match CSV data exactly
- **Repetitions**: Each experiment has 3 repetitions as specified
#### Meyer Experiments
**File**: `supabase/seed_05_meyer_experiments_corrected.sql`
- **Experiment Numbers**: 1-40 (matching CSV data)
- **Soaking Duration**: Converted from hours to minutes (e.g., 27 hours = 1620 minutes)
- **Air Drying Duration**: Used exact values from CSV (11-56 minutes)
- **Meyer Cracker Parameters**: All parameters match CSV data exactly
- **Repetitions**: Each experiment has 1 repetition as specified
## Data Mapping from CSV
### Phase 2 JC Experiments (phase_2_JC_experimental_run_sheet.csv)
- **Unique Experiments**: 20 (numbers 0-19)
- **Repetitions per Experiment**: 3
- **Soaking Duration Range**: 10-38 hours (600-2280 minutes)
- **Air Drying Duration Range**: 10-60 minutes
- **Machine Type**: JC Cracker
### Meyer Experiments (post_workshop_meyer_experiments.csv)
- **Unique Experiments**: 40 (numbers 1-40)
- **Repetitions per Experiment**: 1
- **Soaking Duration Range**: 10-54 hours (600-3240 minutes)
- **Air Drying Duration Range**: 11-56 minutes
- **Machine Type**: Meyer Cracker
## Files Created/Modified
### New Migration Files
1. `supabase/migrations/20250103000001_fix_soaking_duration_column.sql`
2. `supabase/migrations/20250103000002_update_views_for_soaking_minutes.sql`
### New Seed Files
1. `supabase/seed_04_phase2_jc_experiments_corrected.sql`
2. `supabase/seed_05_meyer_experiments_corrected.sql`
## Deployment Instructions
1. **Run the migrations in order**:
```sql
-- First fix the column
\i supabase/migrations/20250103000001_fix_soaking_duration_column.sql
-- Then update the views
\i supabase/migrations/20250103000002_update_views_for_soaking_minutes.sql
```
2. **Clear existing data and run corrected seeds**:
```sql
-- Clear existing data (optional, use with caution)
DELETE FROM public.experiment_repetitions;
DELETE FROM public.jc_cracker_parameters;
DELETE FROM public.meyer_cracker_parameters;
DELETE FROM public.cracking;
DELETE FROM public.airdrying;
DELETE FROM public.soaking;
DELETE FROM public.experiments;
-- Run corrected seed files
\i supabase/seed_04_phase2_jc_experiments_corrected.sql
\i supabase/seed_05_meyer_experiments_corrected.sql
```
## Expected Result
After applying these changes:
- The "Schedule Experiment" component will show correct soaking durations in minutes
- All experiment data will match the CSV source files exactly
- The database schema will be consistent with frontend expectations
- Phase timing calculations will work correctly
## Verification
To verify the fix:
1. Check that `soaking.soaking_duration_minutes` is populated in the database
2. Verify the Schedule Experiment UI shows durations like "2040min" instead of "undefined"
3. Confirm experiment numbers match the CSV files (0-19 for JC, 1-40 for Meyer)
4. Validate that all machine parameters match the CSV data

View File

@@ -1,137 +0,0 @@
# Vision System Dashboard
This document describes the Vision System dashboard that has been added to the Pecan Experiments application.
## Overview
The Vision System dashboard provides real-time monitoring and control of the USDA Vision Camera System. It displays information about cameras, machines, storage, and recording sessions.
## Features
### System Overview
- **System Status**: Shows if the vision system is online/offline with uptime information
- **MQTT Connection**: Displays MQTT connectivity status and last message timestamp
- **Active Recordings**: Shows current number of active recordings and total recordings
- **Camera/Machine Count**: Quick overview of connected devices
### Camera Monitoring
- **Real-time Status**: Shows connection status for each camera (camera1, camera2)
- **Recording State**: Indicates if cameras are currently recording
- **Device Information**: Displays friendly names and serial numbers
- **Error Reporting**: Shows any camera errors or issues
- **Current Recording Files**: Shows active recording filenames
### Machine Status
- **Machine States**: Monitors vibratory conveyor, blower separator, and other machines
- **MQTT Topics**: Shows the MQTT topics for each machine
- **Last Updated**: Timestamps for when each machine status was last updated
- **State Colors**: Visual indicators for machine states (on/off/running/stopped)
### Storage Management
- **Disk Usage**: Visual progress bar showing total disk usage
- **File Statistics**: Total files, total size, and free space
- **Per-Camera Breakdown**: Storage usage statistics for each camera
- **Storage Path**: Shows the base storage directory
### Recording Sessions
- **Recent Recordings Table**: Shows the latest recording sessions
- **Recording Details**: Filename, camera, status, duration, file size, start time
- **Status Indicators**: Visual status badges for completed/active/failed recordings
## API Integration
The dashboard connects to the Vision System API running on `http://localhost:8000` and provides:
### Endpoints Used
- `GET /system/status` - System overview and status
- `GET /cameras` - Camera status and information
- `GET /machines` - Machine status and MQTT data
- `GET /storage/stats` - Storage usage statistics
- `GET /recordings` - Recording session information
### Auto-Refresh
- The dashboard automatically refreshes data every 5 seconds
- Manual refresh button available for immediate updates
- Loading indicators show when data is being fetched
## Access Control
The Vision System dashboard is accessible to all authenticated users regardless of role:
- **Admin**: Full access to all features
- **Conductor**: Full access to all features
- **Analyst**: Full access to all features
- **Data Recorder**: Full access to all features
## Error Handling
The dashboard includes comprehensive error handling:
- **Connection Errors**: Shows user-friendly messages when the vision system is unavailable
- **API Errors**: Displays specific error messages from the vision system API
- **Retry Functionality**: "Try Again" button to retry failed requests
- **Graceful Degradation**: Shows partial data if some API calls fail
## Technical Implementation
### Files Added/Modified
- `src/lib/visionApi.ts` - API client for vision system integration
- `src/components/VisionSystem.tsx` - Main dashboard component
- `src/components/DashboardLayout.tsx` - Added routing for vision system
- `src/components/Sidebar.tsx` - Added menu item for vision system
- `src/components/TopNavbar.tsx` - Added page title for vision system
### Dependencies
- Uses existing React/TypeScript setup
- Leverages Tailwind CSS for styling
- No additional dependencies required
### API Client Features
- TypeScript interfaces for all API responses
- Comprehensive error handling
- Utility functions for formatting (bytes, duration, uptime)
- Singleton pattern for API client instance
## Usage
1. **Navigate to Vision System**: Click "Vision System" in the sidebar menu
2. **Monitor Status**: View real-time system, camera, and machine status
3. **Check Storage**: Monitor disk usage and file statistics
4. **Review Recordings**: See recent recording sessions and their details
5. **Refresh Data**: Use the refresh button or wait for auto-refresh
## Troubleshooting
### Common Issues
1. **"Failed to fetch vision system data"**
- Ensure the vision system API is running on localhost:8000
- Check network connectivity
- Verify the vision system service is started
2. **Empty Dashboard**
- Vision system may not have any cameras or machines configured
- Check vision system configuration
- Verify MQTT connectivity
3. **Outdated Information**
- Data refreshes every 5 seconds automatically
- Use the manual refresh button for immediate updates
- Check if vision system is responding to API calls
### API Base URL Configuration
The API base URL is configured in `src/lib/visionApi.ts`:
```typescript
const VISION_API_BASE_URL = 'http://localhost:8000'
```
To change the API endpoint, modify this constant and rebuild the application.
## Future Enhancements
Potential improvements for the vision system dashboard:
- **Camera Controls**: Add start/stop recording buttons
- **Camera Recovery**: Add diagnostic and recovery action buttons
- **File Management**: Add file browsing and cleanup functionality
- **Real-time Streaming**: Add live camera feed display
- **Alerts**: Add notifications for system issues
- **Historical Data**: Add charts and trends for system metrics

View File

@@ -1,162 +0,0 @@
# 🤖 Auto-Recording Setup Guide
This guide explains how to set up and test the automatic recording functionality that triggers camera recording when machines turn on/off via MQTT.
## 📋 Overview
The auto-recording feature allows cameras to automatically start recording when their associated machine turns on and stop recording when the machine turns off. This is based on MQTT messages received from the machines.
## 🔧 Setup Steps
### 1. Configure Camera Auto-Recording
1. **Access Vision System**: Navigate to the Vision System page in the dashboard
2. **Open Camera Configuration**: Click "Configure Camera" on any camera (admin access required)
3. **Enable Auto-Recording**: In the "Auto-Recording" section, check the box "Automatically start recording when machine turns on"
4. **Save Configuration**: Click "Save Changes" to apply the setting
### 2. Machine-Camera Mapping
The system uses the `machine_topic` field in camera configuration to determine which MQTT topic to monitor:
- **Camera 1** (`camera1`) → monitors `blower_separator`
- **Camera 2** (`camera2`) → monitors `vibratory_conveyor`
### 3. Start Auto-Recording Manager
1. **Navigate to Vision System**: Go to the Vision System page
2. **Find Auto-Recording Section**: Look for the "Auto-Recording" panel (admin only)
3. **Start Monitoring**: Click the "Start" button to begin monitoring MQTT events
4. **Monitor Status**: The panel will show the current state of all cameras and their auto-recording status
## 🧪 Testing the Functionality
### Test Scenario 1: Manual MQTT Message Simulation
If you have access to the MQTT broker, you can test by sending messages:
```bash
# Turn on the vibratory conveyor (should start recording on camera2)
mosquitto_pub -h 192.168.1.110 -t "vision/vibratory_conveyor/state" -m "on"
# Turn off the vibratory conveyor (should stop recording on camera2)
mosquitto_pub -h 192.168.1.110 -t "vision/vibratory_conveyor/state" -m "off"
# Turn on the blower separator (should start recording on camera1)
mosquitto_pub -h 192.168.1.110 -t "vision/blower_separator/state" -m "on"
# Turn off the blower separator (should stop recording on camera1)
mosquitto_pub -h 192.168.1.110 -t "vision/blower_separator/state" -m "off"
```
### Test Scenario 2: Physical Machine Operation
1. **Enable Auto-Recording**: Ensure auto-recording is enabled for the desired cameras
2. **Start Auto-Recording Manager**: Make sure the auto-recording manager is running
3. **Operate Machine**: Turn on the physical machine (conveyor or blower)
4. **Verify Recording**: Check that the camera starts recording automatically
5. **Stop Machine**: Turn off the machine
6. **Verify Stop**: Check that recording stops automatically
## 📊 Monitoring and Verification
### Auto-Recording Status Panel
The Vision System page includes an "Auto-Recording" status panel that shows:
- **Manager Status**: Whether the auto-recording manager is active
- **Camera States**: For each camera:
- Machine state (ON/OFF)
- Recording status (YES/NO)
- Auto-record enabled status
- Last state change timestamp
### MQTT Events Panel
Monitor the MQTT Events section to see:
- Recent machine state changes
- MQTT message timestamps
- Message payloads
### Recording Files
Check the storage section for automatically created recording files:
- Files will be named with pattern: `auto_{machine_name}_{timestamp}.avi`
- Example: `auto_vibratory_conveyor_2025-07-29T10-30-45-123Z.avi`
## 🔍 Troubleshooting
### Auto-Recording Not Starting
1. **Check Configuration**: Verify auto-recording is enabled in camera config
2. **Check Manager Status**: Ensure auto-recording manager is running
3. **Check MQTT Connection**: Verify MQTT client is connected
4. **Check Machine Topic**: Ensure camera's machine_topic matches MQTT topic
5. **Check Permissions**: Ensure you have admin access
### Recording Not Stopping
1. **Check MQTT Messages**: Verify "off" messages are being received
2. **Check Manager Logs**: Look for error messages in browser console
3. **Manual Stop**: Use manual stop recording if needed
### Performance Issues
1. **Polling Interval**: The manager polls MQTT events every 2 seconds by default
2. **Event Processing**: Only new events since last poll are processed
3. **Error Handling**: Failed operations are logged but don't stop the manager
## 🔧 Configuration Options
### Camera Configuration Fields
```json
{
"auto_record_on_machine_start": true, // Enable/disable auto-recording
"machine_topic": "vibratory_conveyor", // MQTT topic to monitor
// ... other camera settings
}
```
### Auto-Recording Manager Settings
- **Polling Interval**: 2000ms (configurable in code)
- **Event Batch Size**: 50 events per poll
- **Filename Pattern**: `auto_{machine_name}_{timestamp}.avi`
## 📝 API Endpoints
### Camera Configuration
- `GET /cameras/{camera_name}/config` - Get camera configuration
- `PUT /cameras/{camera_name}/config` - Update camera configuration
### Recording Control
- `POST /cameras/{camera_name}/start-recording` - Start recording
- `POST /cameras/{camera_name}/stop-recording` - Stop recording
### MQTT Monitoring
- `GET /mqtt/events?limit=50` - Get recent MQTT events
- `GET /machines` - Get machine states
## 🚨 Important Notes
1. **Admin Access Required**: Auto-recording configuration requires admin privileges
2. **Backend Integration**: This frontend implementation requires corresponding backend support
3. **MQTT Dependency**: Functionality depends on stable MQTT connection
4. **Storage Space**: Monitor storage usage as auto-recording can generate many files
5. **Network Reliability**: Ensure stable network connection for MQTT messages
## 🔄 Future Enhancements
Potential improvements for the auto-recording system:
1. **Recording Schedules**: Time-based recording rules
2. **Storage Management**: Automatic cleanup of old recordings
3. **Alert System**: Notifications for recording failures
4. **Advanced Triggers**: Multiple machine dependencies
5. **Recording Profiles**: Different settings per machine state

View File

@@ -1,300 +0,0 @@
# 🏗️ Modular Architecture Guide
This guide demonstrates the modular architecture patterns implemented in the video streaming feature and how to apply them to other parts of the project.
## 🎯 Goals
- **Separation of Concerns**: Each module has a single responsibility
- **Reusability**: Components can be used across different parts of the application
- **Maintainability**: Easy to understand, modify, and test individual pieces
- **Scalability**: Easy to add new features without affecting existing code
## 📁 Feature-Based Structure
```
src/features/video-streaming/
├── components/ # UI Components
│ ├── VideoPlayer.tsx
│ ├── VideoCard.tsx
│ ├── VideoList.tsx
│ ├── VideoModal.tsx
│ ├── VideoThumbnail.tsx
│ └── index.ts
├── hooks/ # Custom React Hooks
│ ├── useVideoList.ts
│ ├── useVideoPlayer.ts
│ ├── useVideoInfo.ts
│ └── index.ts
├── services/ # API & Business Logic
│ └── videoApi.ts
├── types/ # TypeScript Definitions
│ └── index.ts
├── utils/ # Pure Utility Functions
│ └── videoUtils.ts
├── VideoStreamingPage.tsx # Main Feature Page
└── index.ts # Feature Export
```
## 🧩 Layer Responsibilities
### 1. **Components Layer** (`/components`)
- **Purpose**: Pure UI components that handle rendering and user interactions
- **Rules**:
- No direct API calls
- Receive data via props
- Emit events via callbacks
- Minimal business logic
**Example:**
```tsx
// ✅ Good: Pure component with clear props
export const VideoCard: React.FC<VideoCardProps> = ({
video,
onClick,
showMetadata = true,
}) => {
return (
<div onClick={() => onClick?.(video)}>
{/* UI rendering */}
</div>
);
};
// ❌ Bad: Component with API calls
export const VideoCard = () => {
const [video, setVideo] = useState(null);
useEffect(() => {
fetch('/api/videos/123').then(/* ... */); // Don't do this!
}, []);
};
```
### 2. **Hooks Layer** (`/hooks`)
- **Purpose**: Manage state, side effects, and provide data to components
- **Rules**:
- Handle API calls and data fetching
- Manage component state
- Provide clean interfaces to components
**Example:**
```tsx
// ✅ Good: Hook handles complexity, provides simple interface
export function useVideoList(options = {}) {
const [videos, setVideos] = useState([]);
const [loading, setLoading] = useState(false);
const fetchVideos = useCallback(async () => {
setLoading(true);
try {
const data = await videoApiService.getVideos();
setVideos(data.videos);
} finally {
setLoading(false);
}
}, []);
return { videos, loading, refetch: fetchVideos };
}
```
### 3. **Services Layer** (`/services`)
- **Purpose**: Handle external dependencies (APIs, storage, etc.)
- **Rules**:
- Pure functions or classes
- No React dependencies
- Handle errors gracefully
- Provide consistent interfaces
**Example:**
```tsx
// ✅ Good: Service handles API complexity
export class VideoApiService {
async getVideos(params = {}) {
try {
const response = await fetch(this.buildUrl('/videos', params));
return await this.handleResponse(response);
} catch (error) {
throw new VideoApiError('FETCH_ERROR', error.message);
}
}
}
```
### 4. **Types Layer** (`/types`)
- **Purpose**: Centralized TypeScript definitions
- **Rules**:
- Define all interfaces and types
- Export from index.ts
- Keep types close to their usage
### 5. **Utils Layer** (`/utils`)
- **Purpose**: Pure utility functions
- **Rules**:
- No side effects
- Easily testable
- Single responsibility
## 🔄 Component Composition Patterns
### Small, Focused Components
Instead of large monolithic components, create small, focused ones:
```tsx
// ✅ Good: Small, focused components
<VideoList>
{videos.map(video => (
<VideoCard key={video.id} video={video} onClick={onVideoSelect} />
))}
</VideoList>
// ❌ Bad: Monolithic component
<VideoSystemPage>
{/* 500+ lines of mixed concerns */}
</VideoSystemPage>
```
### Composition over Inheritance
```tsx
// ✅ Good: Compose features
export const VideoStreamingPage = () => {
const { videos, loading } = useVideoList();
const [selectedVideo, setSelectedVideo] = useState(null);
return (
<div>
<VideoList videos={videos} onVideoSelect={setSelectedVideo} />
<VideoModal video={selectedVideo} />
</div>
);
};
```
## 🎨 Applying to Existing Components
### Example: Breaking Down VisionSystem Component
**Current Structure (Monolithic):**
```tsx
// ❌ Current: One large component
export const VisionSystem = () => {
// 900+ lines of mixed concerns
return (
<div>
{/* System status */}
{/* Camera cards */}
{/* Storage info */}
{/* MQTT status */}
</div>
);
};
```
**Proposed Modular Structure:**
```
src/features/vision-system/
├── components/
│ ├── SystemStatusCard.tsx
│ ├── CameraCard.tsx
│ ├── CameraGrid.tsx
│ ├── StorageOverview.tsx
│ ├── MqttStatus.tsx
│ └── index.ts
├── hooks/
│ ├── useSystemStatus.ts
│ ├── useCameraList.ts
│ └── index.ts
├── services/
│ └── visionApi.ts
└── VisionSystemPage.tsx
```
**Refactored Usage:**
```tsx
// ✅ Better: Composed from smaller parts
export const VisionSystemPage = () => {
return (
<div>
<SystemStatusCard />
<CameraGrid />
<StorageOverview />
<MqttStatus />
</div>
);
};
// Now you can reuse components elsewhere:
export const DashboardHome = () => {
return (
<div>
<SystemStatusCard /> {/* Reused! */}
<QuickStats />
</div>
);
};
```
## 📋 Migration Strategy
### Phase 1: Extract Utilities
1. Move pure functions to `/utils`
2. Move types to `/types`
3. Create service classes for API calls
### Phase 2: Extract Hooks
1. Create custom hooks for data fetching
2. Move state management to hooks
3. Simplify component logic
### Phase 3: Break Down Components
1. Identify distinct UI sections
2. Extract to separate components
3. Use composition in parent components
### Phase 4: Feature Organization
1. Group related components, hooks, and services
2. Create feature-level exports
3. Update imports across the application
## 🧪 Testing Benefits
Modular architecture makes testing much easier:
```tsx
// ✅ Easy to test individual pieces
describe('VideoCard', () => {
it('displays video information', () => {
render(<VideoCard video={mockVideo} />);
expect(screen.getByText(mockVideo.filename)).toBeInTheDocument();
});
});
describe('useVideoList', () => {
it('fetches videos on mount', async () => {
const { result } = renderHook(() => useVideoList());
await waitFor(() => {
expect(result.current.videos).toHaveLength(3);
});
});
});
```
## 🚀 Benefits Achieved
1. **Reusability**: `VideoCard` can be used in lists, grids, or modals
2. **Maintainability**: Each file has a single, clear purpose
3. **Testability**: Small, focused units are easy to test
4. **Developer Experience**: Clear structure makes onboarding easier
5. **Performance**: Smaller components enable better optimization
## 📝 Best Practices
1. **Start Small**: Begin with one feature and apply patterns gradually
2. **Single Responsibility**: Each file should have one clear purpose
3. **Clear Interfaces**: Use TypeScript to define clear contracts
4. **Consistent Naming**: Follow naming conventions across features
5. **Documentation**: Document complex logic and interfaces
This modular approach transforms large, hard-to-maintain components into small, reusable, and testable pieces that can be composed together to create powerful features.

View File

@@ -1,145 +0,0 @@
# 🎥 MP4 Frontend Implementation Status
## ✅ Implementation Complete & API-Aligned
The frontend has been successfully updated to match the actual camera configuration API structure with full MP4 format support and proper field categorization.
## 🔧 Changes Made
### 1. **TypeScript Types Updated** (`src/lib/visionApi.ts`)
- **Complete API alignment** with actual camera configuration structure
- **Required video format fields**: `video_format`, `video_codec`, `video_quality`
- **Added missing fields**: `wb_red_gain`, `wb_green_gain`, `wb_blue_gain`
- **Proper field categorization**: Read-only vs real-time configurable vs restart-required
### 2. **Video File Utilities Created** (`src/utils/videoFileUtils.ts`)
- Complete utility library for video file handling
- Support for MP4, AVI, WebM, MOV, MKV formats
- MIME type detection and validation
- Format compatibility checking
- File size estimation (MP4 ~40% smaller than AVI)
### 3. **Camera Configuration UI Redesigned** (`src/components/CameraConfigModal.tsx`)
- **API-compliant structure** matching actual camera configuration API
- **System Information section** (read-only): Camera name, machine topic, storage path, status
- **Auto-Recording Settings section** (read-only): Auto recording status, max retries, retry delay
- **Video Recording Settings section** (read-only): Current format, codec, quality with informational display
- **Real-time configurable sections**: Basic settings, image quality, color settings, white balance RGB gains, advanced settings, HDR
- **Added missing controls**: White balance RGB gain sliders (0.00-3.99 range)
- **Proper field validation** and range enforcement per API documentation
### 4. **Video Player Components Improved**
- **VideoPlayer**: Dynamic MIME type detection, iOS compatibility (`playsInline`)
- **VideoModal**: Format indicators with web compatibility badges
- **VideoUtils**: Enhanced format detection and utilities
## 🚨 Current API Compatibility Issue
### Problem
The backend API is returning a validation error:
```
3 validation errors for CameraConfigResponse
video_format: Field required
video_codec: Field required
video_quality: Field required
```
### Root Cause
The backend expects the new video format fields to be required, but existing camera configurations don't have these fields yet.
### Frontend Solution ✅
The frontend now handles this gracefully:
1. **Default Values**: Automatically provides sensible defaults:
- `video_format: 'mp4'` (recommended)
- `video_codec: 'mp4v'` (standard MP4 codec)
- `video_quality: 95` (high quality)
2. **Error Handling**: Shows helpful error message when API fails
3. **Fallback Configuration**: Creates a working default configuration
4. **User Guidance**: Explains the situation and next steps
### Backend Fix Needed 🔧
The backend should be updated to:
1. Make video format fields optional in the API response
2. Provide default values when fields are missing
3. Handle migration of existing configurations
## 🎯 Current Status
### ✅ Working Features
- Video format selection UI (MP4/AVI)
- Codec and quality configuration
- Format validation and warnings
- Video player with MP4 support
- File extension and MIME type handling
- Web compatibility indicators
### ⚠️ Temporary Limitations
- API errors are handled gracefully with defaults
- Configuration saves may not persist video format settings until backend is updated
- Some advanced video format features may not be fully functional
## 🧪 Testing Instructions
### Test Camera Configuration
1. Open Vision System page
2. Click "Configure" on any camera
3. Scroll to "Video Recording Settings" section
4. Verify format/codec/quality controls work
5. Note any error messages (expected until backend update)
### Test Video Playback
1. Verify existing AVI videos still play
2. Test any new MP4 videos (if available)
3. Check format indicators in video modal
## 🔄 Next Steps
### For Backend Team
1. Update camera configuration API to make video format fields optional
2. Provide default values for missing fields
3. Implement video format persistence in database
4. Test API with updated frontend
### For Frontend Team
1. Test thoroughly once backend is updated
2. Remove temporary error handling once API is fixed
3. Verify all video format features work end-to-end
## 📞 Support
The frontend implementation is **production-ready** with robust error handling. Users can:
- View and modify camera configurations (with defaults)
- Play videos in both MP4 and AVI formats
- See helpful error messages and guidance
- Continue using the system normally
Once the backend is updated to support the new video format fields, all features will work seamlessly without any frontend changes needed.
## 🎉 Benefits Ready to Unlock
Once backend is updated:
- **40% smaller file sizes** with MP4 format
- **Better web compatibility** and mobile support
- **Improved streaming performance**
- **Professional video quality** maintained
- **Seamless format migration** for existing recordings

View File

@@ -1,351 +0,0 @@
# 🎬 Video Streaming Integration Guide
This guide shows how to integrate the modular video streaming feature into your existing dashboard.
## 🚀 Quick Start
### 1. Add to Dashboard Navigation
Update your sidebar or navigation to include the video streaming page:
```tsx
// In src/components/Sidebar.tsx or similar
import { VideoStreamingPage } from '../features/video-streaming';
const navigationItems = [
// ... existing items
{
name: 'Video Library',
href: '/videos',
icon: VideoCameraIcon,
component: VideoStreamingPage,
},
];
```
### 2. Add Route (if using React Router)
```tsx
// In your main App.tsx or router configuration
import { VideoStreamingPage } from './features/video-streaming';
function App() {
return (
<Routes>
{/* ... existing routes */}
<Route path="/videos" element={<VideoStreamingPage />} />
</Routes>
);
}
```
## 🧩 Using Individual Components
The beauty of the modular architecture is that you can use individual components anywhere:
### Dashboard Home - Recent Videos
```tsx
// In src/components/DashboardHome.tsx
import { VideoList } from '../features/video-streaming';
export const DashboardHome = () => {
return (
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
{/* Existing dashboard content */}
<div className="bg-white rounded-lg shadow p-6">
<h2 className="text-lg font-semibold mb-4">Recent Videos</h2>
<VideoList
limit={6}
filters={{ /* recent videos only */ }}
className="grid grid-cols-2 gap-4"
/>
</div>
</div>
);
};
```
### Vision System - Camera Videos
```tsx
// In src/components/VisionSystem.tsx
import { VideoList, VideoCard } from '../features/video-streaming';
export const VisionSystem = () => {
const [selectedCamera, setSelectedCamera] = useState(null);
return (
<div>
{/* Existing vision system content */}
{/* Add video section for selected camera */}
{selectedCamera && (
<div className="mt-8">
<h3 className="text-lg font-semibold mb-4">
Recent Videos - {selectedCamera}
</h3>
<VideoList
filters={{ cameraName: selectedCamera }}
limit={8}
/>
</div>
)}
</div>
);
};
```
### Experiment Data Entry - Video Evidence
```tsx
// In src/components/DataEntry.tsx
import { VideoThumbnail, VideoModal } from '../features/video-streaming';
export const DataEntry = () => {
const [selectedVideo, setSelectedVideo] = useState(null);
const [showVideoModal, setShowVideoModal] = useState(false);
return (
<form>
{/* Existing form fields */}
{/* Add video evidence section */}
<div className="mb-6">
<label className="block text-sm font-medium text-gray-700 mb-2">
Video Evidence
</label>
<div className="grid grid-cols-4 gap-4">
{experimentVideos.map(video => (
<VideoThumbnail
key={video.file_id}
fileId={video.file_id}
onClick={() => {
setSelectedVideo(video);
setShowVideoModal(true);
}}
/>
))}
</div>
</div>
<VideoModal
video={selectedVideo}
isOpen={showVideoModal}
onClose={() => setShowVideoModal(false)}
/>
</form>
);
};
```
## 🎨 Customizing Components
### Custom Video Card for Experiments
```tsx
// Create a specialized version for your use case
import { VideoCard } from '../features/video-streaming';
export const ExperimentVideoCard = ({ video, experimentId, onAttach }) => {
return (
<div className="relative">
<VideoCard video={video} showMetadata={false} />
{/* Add experiment-specific actions */}
<div className="absolute top-2 right-2">
<button
onClick={() => onAttach(video.file_id, experimentId)}
className="bg-blue-500 text-white px-2 py-1 rounded text-xs"
>
Attach to Experiment
</button>
</div>
</div>
);
};
```
### Custom Video Player with Annotations
```tsx
// Extend the base video player
import { VideoPlayer } from '../features/video-streaming';
export const AnnotatedVideoPlayer = ({ fileId, annotations }) => {
return (
<div className="relative">
<VideoPlayer fileId={fileId} />
{/* Add annotation overlay */}
<div className="absolute inset-0 pointer-events-none">
{annotations.map(annotation => (
<div
key={annotation.id}
className="absolute bg-yellow-400 bg-opacity-75 p-2 rounded"
style={{
left: `${annotation.x}%`,
top: `${annotation.y}%`,
}}
>
{annotation.text}
</div>
))}
</div>
</div>
);
};
```
## 🔧 Configuration
### API Base URL
Update the API base URL if needed:
```tsx
// In your app configuration
import { VideoApiService } from './features/video-streaming';
// Create a configured instance
export const videoApi = new VideoApiService('http://your-api-server:8000');
// Or set globally
process.env.REACT_APP_VIDEO_API_URL = 'http://your-api-server:8000';
```
### Custom Styling
The components use Tailwind CSS classes. You can customize them:
```tsx
// Override default styles
<VideoList
className="grid grid-cols-1 md:grid-cols-3 gap-8" // Custom grid
/>
<VideoCard
className="border-2 border-blue-200 hover:border-blue-400" // Custom border
/>
```
## 🎯 Integration Examples
### 1. Camera Management Integration
```tsx
// In your camera management page
import { VideoList, useVideoList } from '../features/video-streaming';
export const CameraManagement = () => {
const [selectedCamera, setSelectedCamera] = useState(null);
const { videos } = useVideoList({
initialParams: { camera_name: selectedCamera?.name }
});
return (
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
{/* Camera controls */}
<CameraControls onCameraSelect={setSelectedCamera} />
{/* Videos from selected camera */}
<div>
<h3>Videos from {selectedCamera?.name}</h3>
<VideoList
filters={{ cameraName: selectedCamera?.name }}
limit={12}
/>
</div>
</div>
);
};
```
### 2. Experiment Timeline Integration
```tsx
// Show videos in experiment timeline
import { VideoThumbnail } from '../features/video-streaming';
export const ExperimentTimeline = ({ experiment }) => {
return (
<div className="timeline">
{experiment.events.map(event => (
<div key={event.id} className="timeline-item">
<div className="timeline-content">
<h4>{event.title}</h4>
<p>{event.description}</p>
{/* Show related videos */}
{event.videos?.length > 0 && (
<div className="flex space-x-2 mt-2">
{event.videos.map(videoId => (
<VideoThumbnail
key={videoId}
fileId={videoId}
width={120}
height={80}
/>
))}
</div>
)}
</div>
</div>
))}
</div>
);
};
```
## 📱 Responsive Design
The components are designed to be responsive:
```tsx
// Automatic responsive grid
<VideoList className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 gap-4" />
// Mobile-friendly video player
<VideoPlayer
fileId={video.file_id}
className="w-full h-auto max-h-96"
/>
```
## 🔍 Search Integration
Add search functionality:
```tsx
import { useVideoList } from '../features/video-streaming';
export const VideoSearch = () => {
const [searchTerm, setSearchTerm] = useState('');
const { videos, loading } = useVideoList({
initialParams: { search: searchTerm }
});
return (
<div>
<input
type="text"
value={searchTerm}
onChange={(e) => setSearchTerm(e.target.value)}
placeholder="Search videos..."
className="w-full px-4 py-2 border rounded-lg"
/>
<VideoList videos={videos} loading={loading} />
</div>
);
};
```
## 🚀 Next Steps
1. **Start Small**: Begin by adding the video library page
2. **Integrate Gradually**: Add individual components to existing pages
3. **Customize**: Create specialized versions for your specific needs
4. **Extend**: Add new features like annotations, bookmarks, or sharing
The modular architecture makes it easy to start simple and grow the functionality over time!

View File

@@ -1,175 +0,0 @@
# Video Streaming Integration - Complete Implementation
This document provides a comprehensive overview of the completed video streaming integration with the USDA Vision Camera System.
## 🎯 Overview
The video streaming functionality has been successfully integrated into the Pecan Experiments React application, providing a complete video browsing and playback interface that connects to the USDA Vision Camera System API.
## ✅ Completed Features
### 1. Core Video Streaming Components
- **VideoList**: Displays filterable list of videos with pagination
- **VideoPlayer**: HTML5 video player with custom controls and range request support
- **VideoCard**: Individual video cards with thumbnails and metadata
- **VideoThumbnail**: Thumbnail component with caching and error handling
- **VideoModal**: Modal for video playback
- **Pagination**: Pagination controls for large video collections
### 2. API Integration
- **VideoApiService**: Complete API client for USDA Vision Camera System
- **Flexible Configuration**: Environment-based API URL configuration
- **Error Handling**: Comprehensive error handling with user-friendly messages
- **Performance Monitoring**: Built-in performance tracking and metrics
### 3. Performance Optimizations
- **Thumbnail Caching**: Intelligent caching system with LRU eviction
- **Performance Monitoring**: Real-time performance metrics and reporting
- **Efficient Loading**: Optimized API calls and data fetching
- **Memory Management**: Automatic cleanup and memory optimization
### 4. Error Handling & User Experience
- **Error Boundaries**: React error boundaries for graceful error handling
- **API Status Indicator**: Real-time API connectivity status
- **Loading States**: Comprehensive loading indicators
- **User Feedback**: Clear error messages and recovery options
### 5. Development Tools
- **Performance Dashboard**: Development-only performance monitoring UI
- **Debug Information**: Detailed error information in development mode
- **Cache Statistics**: Real-time cache performance metrics
## 🔧 Configuration
### Environment Variables
Create a `.env` file with the following configuration:
```bash
# USDA Vision Camera System API Configuration
# Default: http://localhost:8000 (Docker container)
# For local development without Docker: http://localhost:8000
# For remote systems: http://192.168.1.100:8000
VITE_VISION_API_URL=http://localhost:8000
```
### API Endpoints Used
- `GET /videos/` - List videos with filtering and pagination
- `GET /videos/{file_id}` - Get detailed video information
- `GET /videos/{file_id}/stream` - Stream video content with range requests
- `GET /videos/{file_id}/thumbnail` - Generate video thumbnails
- `GET /videos/{file_id}/info` - Get streaming technical details
- `POST /videos/{file_id}/validate` - Validate video accessibility
## 🚀 Usage
### Navigation
The video streaming functionality is accessible through:
- **Main Navigation**: "Video Library" menu item
- **Vision System**: Integrated with existing vision system dashboard
### Features Available
1. **Browse Videos**: Filter by camera, date range, and sort options
2. **View Thumbnails**: Automatic thumbnail generation with caching
3. **Play Videos**: Full-featured video player with seeking capabilities
4. **Performance Monitoring**: Real-time performance metrics (development mode)
### User Interface
- **Responsive Design**: Works on desktop and mobile devices
- **Dark/Light Theme**: Follows application theme preferences
- **Accessibility**: Keyboard navigation and screen reader support
## 🔍 Technical Implementation
### Architecture
```
src/features/video-streaming/
├── components/ # React components
│ ├── VideoList.tsx # Video listing with filters
│ ├── VideoPlayer.tsx # Video playback component
│ ├── VideoCard.tsx # Individual video cards
│ ├── VideoThumbnail.tsx # Thumbnail component
│ ├── VideoModal.tsx # Video playback modal
│ ├── ApiStatusIndicator.tsx # API status display
│ ├── VideoErrorBoundary.tsx # Error handling
│ └── PerformanceDashboard.tsx # Dev tools
├── hooks/ # Custom React hooks
│ ├── useVideoList.ts # Video list management
│ ├── useVideoPlayer.ts # Video player state
│ └── useVideoInfo.ts # Video information
├── services/ # API services
│ └── videoApi.ts # USDA Vision API client
├── utils/ # Utilities
│ ├── videoUtils.ts # Video helper functions
│ ├── thumbnailCache.ts # Thumbnail caching
│ └── performanceMonitor.ts # Performance tracking
├── types/ # TypeScript types
└── VideoStreamingPage.tsx # Main page component
```
### Key Technologies
- **React 18**: Modern React with hooks and concurrent features
- **TypeScript**: Full type safety and IntelliSense
- **Tailwind CSS**: Utility-first styling
- **HTML5 Video**: Native video playback with custom controls
- **Fetch API**: Modern HTTP client for API calls
## 📊 Performance Features
### Thumbnail Caching
- **LRU Cache**: Least Recently Used eviction policy
- **Memory Management**: Configurable memory limits
- **Automatic Cleanup**: Expired entry removal
- **Statistics**: Real-time cache performance metrics
### Performance Monitoring
- **Operation Tracking**: Automatic timing of API calls
- **Success Rates**: Track success/failure rates
- **Memory Usage**: Monitor cache memory consumption
- **Development Dashboard**: Visual performance metrics
### Optimizations
- **Range Requests**: Efficient video seeking with HTTP range requests
- **Lazy Loading**: Thumbnails loaded on demand
- **Error Recovery**: Automatic retry mechanisms
- **Connection Pooling**: Efficient HTTP connection reuse
## 🛠️ Development
### Testing the Integration
1. **Start the Application**: `npm run dev`
2. **Navigate to Video Library**: Use the sidebar navigation
3. **Check API Status**: Look for the connection indicator
4. **Browse Videos**: Filter and sort available videos
5. **Play Videos**: Click on video cards to open the player
### Development Tools
- **Performance Dashboard**: Click the performance icon (bottom-right)
- **Browser DevTools**: Check console for performance logs
- **Network Tab**: Monitor API calls and response times
### Troubleshooting
1. **API Connection Issues**: Check VITE_VISION_API_URL environment variable
2. **Video Not Playing**: Verify video file accessibility and format
3. **Thumbnail Errors**: Check thumbnail generation API endpoint
4. **Performance Issues**: Use the performance dashboard to identify bottlenecks
## 🔮 Future Enhancements
### Potential Improvements
- **Video Upload**: Add video upload functionality
- **Live Streaming**: Integrate live camera feeds
- **Video Analytics**: Add video analysis and metadata extraction
- **Offline Support**: Cache videos for offline viewing
- **Advanced Filters**: More sophisticated filtering options
### Integration Opportunities
- **Experiment Data**: Link videos to experiment data
- **Machine Learning**: Integrate with video analysis models
- **Export Features**: Video export and sharing capabilities
- **Collaboration**: Multi-user video annotation and comments
## 📝 Conclusion
The video streaming integration provides a robust, performant, and user-friendly interface for browsing and viewing videos from the USDA Vision Camera System. The implementation includes comprehensive error handling, performance optimizations, and development tools to ensure a smooth user experience and maintainable codebase.
The modular architecture allows for easy extension and customization, while the performance monitoring and caching systems ensure optimal performance even with large video collections.

View File

@@ -4,7 +4,7 @@
<meta charset="UTF-8" /> <meta charset="UTF-8" />
<link rel="icon" type="image/svg+xml" href="/vite.svg" /> <link rel="icon" type="image/svg+xml" href="/vite.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>Vite + React + TS</title> <title>Experiments Dashboard</title>
</head> </head>
<body> <body>
<div id="root"></div> <div id="root"></div>

View File

@@ -22,6 +22,7 @@
}, },
"devDependencies": { "devDependencies": {
"@eslint/js": "^9.30.1", "@eslint/js": "^9.30.1",
"@originjs/vite-plugin-federation": "^1.3.3",
"@types/react": "^19.1.8", "@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6", "@types/react-dom": "^19.1.6",
"@types/react-router-dom": "^5.3.3", "@types/react-router-dom": "^5.3.3",
@@ -1052,6 +1053,34 @@
"node": ">= 8" "node": ">= 8"
} }
}, },
"node_modules/@originjs/vite-plugin-federation": {
"version": "1.4.1",
"resolved": "https://registry.npmjs.org/@originjs/vite-plugin-federation/-/vite-plugin-federation-1.4.1.tgz",
"integrity": "sha512-Uo08jW5pj1t58OUKuZNkmzcfTN2pqeVuAWCCiKf/75/oll4Efq4cHOqSE1FXMlvwZNGDziNdDyBbQ5IANem3CQ==",
"dev": true,
"license": "MulanPSL-2.0",
"dependencies": {
"estree-walker": "^3.0.2",
"magic-string": "^0.27.0"
},
"engines": {
"node": ">=14.0.0",
"pnpm": ">=7.0.1"
}
},
"node_modules/@originjs/vite-plugin-federation/node_modules/magic-string": {
"version": "0.27.0",
"resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.27.0.tgz",
"integrity": "sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==",
"dev": true,
"license": "MIT",
"dependencies": {
"@jridgewell/sourcemap-codec": "^1.4.13"
},
"engines": {
"node": ">=12"
}
},
"node_modules/@popperjs/core": { "node_modules/@popperjs/core": {
"version": "2.11.8", "version": "2.11.8",
"resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz", "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz",
@@ -2733,6 +2762,16 @@
"node": ">=4.0" "node": ">=4.0"
} }
}, },
"node_modules/estree-walker": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz",
"integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/estree": "^1.0.0"
}
},
"node_modules/esutils": { "node_modules/esutils": {
"version": "2.0.3", "version": "2.0.3",
"resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz",

View File

@@ -24,6 +24,7 @@
}, },
"devDependencies": { "devDependencies": {
"@eslint/js": "^9.30.1", "@eslint/js": "^9.30.1",
"@originjs/vite-plugin-federation": "^1.3.3",
"@types/react": "^19.1.8", "@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6", "@types/react-dom": "^19.1.6",
"@types/react-router-dom": "^5.3.3", "@types/react-router-dom": "^5.3.3",

View File

@@ -1,129 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Camera Route Test</title>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
background-color: #f5f5f5;
}
.test-links {
background: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
margin-bottom: 20px;
}
.test-links h2 {
color: #333;
margin-top: 0;
}
.test-links a {
display: inline-block;
margin: 10px 10px 10px 0;
padding: 10px 20px;
background-color: #007bff;
color: white;
text-decoration: none;
border-radius: 5px;
transition: background-color 0.3s;
}
.test-links a:hover {
background-color: #0056b3;
}
.info {
background: #e7f3ff;
border: 1px solid #b3d9ff;
border-radius: 5px;
padding: 15px;
margin-bottom: 20px;
}
.info h3 {
margin-top: 0;
color: #0066cc;
}
.note {
background: #fff3cd;
border: 1px solid #ffeaa7;
border-radius: 5px;
padding: 15px;
margin-bottom: 20px;
}
</style>
</head>
<body>
<h1>🎥 Camera Route Test</h1>
<div class="info">
<h3>Test the New Camera Routes</h3>
<p>This page helps you test the new camera live view routes that don't require authentication.</p>
<p><strong>Note:</strong> Make sure the React app is running and the camera API is accessible.</p>
</div>
<div class="test-links">
<h2>Test Camera Routes</h2>
<p>Click the links below to test different camera routes:</p>
<a href="/camera1/live" target="_blank">Camera 1 Live View</a>
<a href="/camera2/live" target="_blank">Camera 2 Live View</a>
<a href="/camera3/live" target="_blank">Camera 3 Live View</a>
<a href="/camera10/live" target="_blank">Camera 10 Live View</a>
</div>
<div class="note">
<h3>Expected Behavior</h3>
<ul>
<li><strong>Valid routes</strong> (like /camera1/live) should show the live camera feed</li>
<li><strong>Invalid routes</strong> (like /camera/live) should show an error message</li>
<li>🔒 <strong>Protected routes</strong> (like /) should redirect to login if not authenticated</li>
</ul>
</div>
<div class="info">
<h3>API Endpoints</h3>
<p>The camera routes use these backend API endpoints:</p>
<ul>
<li><code>POST /cameras/{camera_name}/start-stream</code> - Start streaming</li>
<li><code>GET /cameras/{camera_name}/stream</code> - Get MJPEG stream</li>
<li><code>POST /cameras/{camera_name}/stop-stream</code> - Stop streaming</li>
</ul>
</div>
<script>
// Add click tracking for analytics
document.querySelectorAll('.test-links a').forEach(link => {
link.addEventListener('click', function () {
console.log('Testing camera route:', this.href);
});
});
</script>
</body>
</html>

View File

@@ -7,7 +7,10 @@ import { ExperimentManagement } from './ExperimentManagement'
import { DataEntry } from './DataEntry' import { DataEntry } from './DataEntry'
import { VisionSystem } from './VisionSystem' import { VisionSystem } from './VisionSystem'
import { Scheduling } from './Scheduling' import { Scheduling } from './Scheduling'
import { VideoStreamingPage } from '../features/video-streaming' import React, { Suspense } from 'react'
import { loadRemoteComponent } from '../lib/loadRemote'
import { ErrorBoundary } from './ErrorBoundary'
import { isFeatureEnabled } from '../lib/featureFlags'
import { UserProfile } from './UserProfile' import { UserProfile } from './UserProfile'
import { userManagement, type User } from '../lib/supabase' import { userManagement, type User } from '../lib/supabase'
@@ -154,6 +157,13 @@ export function DashboardLayout({ onLogout, currentRoute }: DashboardLayoutProps
} }
} }
const LocalVideoPlaceholder = () => (<div className="p-6">Video module not enabled.</div>)
const RemoteVideoLibrary = loadRemoteComponent(
isFeatureEnabled('enableVideoModule'),
() => import('videoRemote/App'),
LocalVideoPlaceholder as any
) as unknown as React.ComponentType
const renderCurrentView = () => { const renderCurrentView = () => {
if (!user) return null if (!user) return null
@@ -194,7 +204,13 @@ export function DashboardLayout({ onLogout, currentRoute }: DashboardLayoutProps
case 'scheduling': case 'scheduling':
return <Scheduling user={user} currentRoute={currentRoute} /> return <Scheduling user={user} currentRoute={currentRoute} />
case 'video-library': case 'video-library':
return <VideoStreamingPage /> return (
<ErrorBoundary fallback={<div className="p-6">Failed to load video module. Please try again.</div>}>
<Suspense fallback={<div className="p-6">Loading video module...</div>}>
<RemoteVideoLibrary />
</Suspense>
</ErrorBoundary>
)
case 'profile': case 'profile':
return <UserProfile user={user} /> return <UserProfile user={user} />
default: default:

View File

@@ -0,0 +1,24 @@
import { Component, ReactNode } from 'react'
type Props = { children: ReactNode, fallback?: ReactNode }
type State = { hasError: boolean }
export class ErrorBoundary extends Component<Props, State> {
state: State = { hasError: false }
static getDerivedStateFromError() {
return { hasError: true }
}
componentDidCatch() {}
render() {
if (this.state.hasError) {
return this.props.fallback ?? <div className="p-6">Something went wrong loading this section.</div>
}
return this.props.children
}
}

View File

@@ -1,200 +0,0 @@
/**
* VideoStreamingPage Component
*
* Main page component for the video streaming feature.
* Demonstrates how to compose the modular components together.
*/
import React, { useState, useMemo } from 'react';
import { VideoList, VideoModal, ApiStatusIndicator, VideoErrorBoundary, PerformanceDashboard } from './components';
import { type VideoFile, type VideoListFilters, type VideoListSortOptions } from './types';
export const VideoStreamingPage: React.FC = () => {
const [selectedVideo, setSelectedVideo] = useState<VideoFile | null>(null);
const [isModalOpen, setIsModalOpen] = useState(false);
const [filters, setFilters] = useState<VideoListFilters>({});
const [sortOptions, setSortOptions] = useState<VideoListSortOptions>({
field: 'created_at',
direction: 'desc',
});
// Available cameras for filtering (this could come from an API)
const availableCameras = ['camera1', 'camera2', 'camera3']; // This should be fetched from your camera API
const handleVideoSelect = (video: VideoFile) => {
setSelectedVideo(video);
setIsModalOpen(true);
};
const handleModalClose = () => {
setIsModalOpen(false);
setSelectedVideo(null);
};
const handleCameraFilterChange = (cameraName: string) => {
setFilters(prev => ({
...prev,
cameraName: cameraName === 'all' ? undefined : cameraName,
}));
};
const handleSortChange = (field: VideoListSortOptions['field'], direction: VideoListSortOptions['direction']) => {
setSortOptions({ field, direction });
};
const handleDateRangeChange = (start: string, end: string) => {
setFilters(prev => ({
...prev,
dateRange: start && end ? { start, end } : undefined,
}));
};
return (
<VideoErrorBoundary>
<div className="space-y-6">
{/* Header */}
<div className="flex items-start justify-between">
<div>
<h1 className="text-3xl font-bold text-gray-900">Video Library</h1>
<p className="mt-2 text-gray-600">
Browse and view recorded videos from your camera system
</p>
</div>
<div className="flex-shrink-0">
<ApiStatusIndicator showDetails={false} />
</div>
</div>
{/* Filters and Controls */}
<div className="bg-white rounded-xl border border-gray-200 p-6 shadow-theme-sm">
<div className="grid grid-cols-1 lg:grid-cols-4 gap-6">
{/* Camera Filter */}
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Camera
</label>
<div className="relative">
<select
value={filters.cameraName || 'all'}
onChange={(e) => handleCameraFilterChange(e.target.value)}
className="w-full px-3 py-2.5 border border-gray-300 rounded-lg bg-white text-gray-900 focus:outline-none focus:ring-2 focus:ring-brand-500 focus:border-brand-500 shadow-theme-xs transition-colors"
>
<option value="all">All Cameras</option>
{availableCameras.map(camera => (
<option key={camera} value={camera}>
{camera}
</option>
))}
</select>
<div className="absolute inset-y-0 right-0 flex items-center pr-3 pointer-events-none">
<svg className="w-4 h-4 text-gray-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M19 9l-7 7-7-7" />
</svg>
</div>
</div>
</div>
{/* Sort Options */}
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Sort by
</label>
<div className="flex gap-2">
<div className="relative flex-1">
<select
value={sortOptions.field}
onChange={(e) => handleSortChange(e.target.value as VideoListSortOptions['field'], sortOptions.direction)}
className="w-full px-3 py-2.5 border border-gray-300 rounded-lg bg-white text-gray-900 focus:outline-none focus:ring-2 focus:ring-brand-500 focus:border-brand-500 shadow-theme-xs transition-colors"
>
<option value="created_at">Date Created</option>
<option value="file_size_bytes">File Size</option>
<option value="camera_name">Camera Name</option>
<option value="filename">Filename</option>
</select>
<div className="absolute inset-y-0 right-0 flex items-center pr-3 pointer-events-none">
<svg className="w-4 h-4 text-gray-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M19 9l-7 7-7-7" />
</svg>
</div>
</div>
<button
onClick={() => handleSortChange(sortOptions.field, sortOptions.direction === 'asc' ? 'desc' : 'asc')}
className="px-3 py-2.5 border border-gray-300 rounded-lg bg-white hover:bg-gray-50 focus:outline-none focus:ring-2 focus:ring-brand-500 focus:border-brand-500 shadow-theme-xs transition-colors"
title={`Sort ${sortOptions.direction === 'asc' ? 'Descending' : 'Ascending'}`}
>
{sortOptions.direction === 'asc' ? (
<svg className="w-4 h-4 text-gray-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M3 4h13M3 8h9m-9 4h6m4 0l4-4m0 0l4 4m-4-4v12" />
</svg>
) : (
<svg className="w-4 h-4 text-gray-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M3 4h13M3 8h9m-9 4h9m5-4v12m0 0l-4-4m4 4l4-4" />
</svg>
)}
</button>
</div>
</div>
{/* Date Range Filter */}
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
Start Date
</label>
<input
type="date"
value={filters.dateRange?.start || ''}
onChange={(e) => handleDateRangeChange(e.target.value, filters.dateRange?.end || '')}
className="w-full px-3 py-2.5 border border-gray-300 rounded-lg bg-white text-gray-900 focus:outline-none focus:ring-2 focus:ring-brand-500 focus:border-brand-500 shadow-theme-xs transition-colors"
/>
</div>
<div>
<label className="block text-sm font-medium text-gray-700 mb-2">
End Date
</label>
<input
type="date"
value={filters.dateRange?.end || ''}
onChange={(e) => handleDateRangeChange(filters.dateRange?.start || '', e.target.value)}
className="w-full px-3 py-2.5 border border-gray-300 rounded-lg bg-white text-gray-900 focus:outline-none focus:ring-2 focus:ring-brand-500 focus:border-brand-500 shadow-theme-xs transition-colors"
/>
</div>
</div>
{/* Clear Filters */}
{(filters.cameraName || filters.dateRange) && (
<div className="mt-6 pt-6 border-t border-gray-200">
<button
onClick={() => setFilters({})}
className="inline-flex items-center px-4 py-2.5 text-sm font-medium transition rounded-lg border bg-white text-gray-700 border-gray-300 hover:bg-gray-50 focus:outline-none focus:ring-2 focus:ring-brand-500 focus:border-brand-500 shadow-theme-xs"
>
<svg className="w-4 h-4 mr-2" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
Clear All Filters
</button>
</div>
)}
</div>
{/* Video List */}
<VideoList
filters={filters}
sortOptions={sortOptions}
onVideoSelect={handleVideoSelect}
limit={24}
/>
{/* Video Modal */}
<VideoModal
video={selectedVideo}
isOpen={isModalOpen}
onClose={handleModalClose}
/>
{/* Performance Dashboard (development only) */}
<PerformanceDashboard />
</div>
</VideoErrorBoundary>
);
};

View File

@@ -1,133 +0,0 @@
/**
* ApiStatusIndicator Component
*
* A component that displays the connection status of the video streaming API
* and provides helpful information when the API is not accessible.
*/
import React, { useState, useEffect } from 'react';
import { videoApiService } from '../services/videoApi';
interface ApiStatusIndicatorProps {
className?: string;
showDetails?: boolean;
}
export const ApiStatusIndicator: React.FC<ApiStatusIndicatorProps> = ({
className = '',
showDetails = false,
}) => {
const [isOnline, setIsOnline] = useState<boolean | null>(null);
const [isChecking, setIsChecking] = useState(false);
const [lastChecked, setLastChecked] = useState<Date | null>(null);
const checkApiStatus = async () => {
setIsChecking(true);
try {
const status = await videoApiService.healthCheck();
setIsOnline(status);
setLastChecked(new Date());
} catch (error) {
setIsOnline(false);
setLastChecked(new Date());
} finally {
setIsChecking(false);
}
};
useEffect(() => {
checkApiStatus();
// Check status every 30 seconds
const interval = setInterval(checkApiStatus, 30000);
return () => clearInterval(interval);
}, []);
const getStatusColor = () => {
if (isChecking) return 'bg-yellow-500';
if (isOnline === null) return 'bg-gray-500';
return isOnline ? 'bg-green-500' : 'bg-red-500';
};
const getStatusText = () => {
if (isChecking) return 'Checking...';
if (isOnline === null) return 'Unknown';
return isOnline ? 'Connected' : 'Disconnected';
};
const getStatusIcon = () => {
if (isChecking) {
return (
<div className="animate-spin rounded-full h-3 w-3 border-b-2 border-white"></div>
);
}
if (isOnline) {
return (
<svg className="w-3 h-3" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M16.707 5.293a1 1 0 010 1.414l-8 8a1 1 0 01-1.414 0l-4-4a1 1 0 011.414-1.414L8 12.586l7.293-7.293a1 1 0 011.414 0z" clipRule="evenodd" />
</svg>
);
}
return (
<svg className="w-3 h-3" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M4.293 4.293a1 1 0 011.414 0L10 8.586l4.293-4.293a1 1 0 111.414 1.414L11.414 10l4.293 4.293a1 1 0 01-1.414 1.414L10 11.414l-4.293 4.293a1 1 0 01-1.414-1.414L8.586 10 4.293 5.707a1 1 0 010-1.414z" clipRule="evenodd" />
</svg>
);
};
if (!showDetails) {
return (
<div className={`inline-flex items-center ${className}`}>
<div className={`w-2 h-2 rounded-full ${getStatusColor()} mr-2`}></div>
<span className="text-sm text-gray-600">{getStatusText()}</span>
</div>
);
}
return (
<div className={`bg-white border border-gray-200 rounded-lg p-4 ${className}`}>
<div className="flex items-center justify-between mb-2">
<h3 className="text-sm font-medium text-gray-900">Video API Status</h3>
<button
onClick={checkApiStatus}
disabled={isChecking}
className="text-xs text-blue-600 hover:text-blue-800 disabled:opacity-50"
>
Refresh
</button>
</div>
<div className="flex items-center mb-2">
<div className={`w-3 h-3 rounded-full ${getStatusColor()} mr-2 flex items-center justify-center text-white`}>
{getStatusIcon()}
</div>
<span className="text-sm font-medium">{getStatusText()}</span>
</div>
{lastChecked && (
<div className="text-xs text-gray-500">
Last checked: {lastChecked.toLocaleTimeString()}
</div>
)}
{isOnline === false && (
<div className="mt-3 p-3 bg-red-50 border border-red-200 rounded-md">
<div className="text-sm text-red-800">
<strong>Connection Failed</strong>
<p className="mt-1">
Cannot connect to the USDA Vision Camera System. Please ensure:
</p>
<ul className="mt-2 list-disc list-inside space-y-1">
<li>The vision system is running</li>
<li>The API is accessible at the configured URL</li>
<li>Network connectivity is available</li>
</ul>
</div>
</div>
)}
</div>
);
};

View File

@@ -1,159 +0,0 @@
/**
* Pagination Component
*
* A reusable pagination component that matches the dashboard template's styling patterns.
* Provides page navigation with first/last, previous/next, and numbered page buttons.
*/
import { type PaginationProps } from '../types';
export const Pagination: React.FC<PaginationProps> = ({
currentPage,
totalPages,
onPageChange,
showFirstLast = true,
showPrevNext = true,
maxVisiblePages = 5,
className = '',
}) => {
// Don't render if there's only one page or no pages
if (totalPages <= 1) {
return null;
}
// Calculate visible page numbers
const getVisiblePages = (): number[] => {
const pages: number[] = [];
const halfVisible = Math.floor(maxVisiblePages / 2);
let startPage = Math.max(1, currentPage - halfVisible);
let endPage = Math.min(totalPages, currentPage + halfVisible);
// Adjust if we're near the beginning or end
if (endPage - startPage + 1 < maxVisiblePages) {
if (startPage === 1) {
endPage = Math.min(totalPages, startPage + maxVisiblePages - 1);
} else {
startPage = Math.max(1, endPage - maxVisiblePages + 1);
}
}
for (let i = startPage; i <= endPage; i++) {
pages.push(i);
}
return pages;
};
const visiblePages = getVisiblePages();
const isFirstPage = currentPage === 1;
const isLastPage = currentPage === totalPages;
// Button base classes matching dashboard template
const baseButtonClasses = "inline-flex items-center justify-center px-3 py-2 text-sm font-medium transition-all duration-200 rounded-lg border";
// Active page button classes
const activeButtonClasses = "bg-brand-500 text-white border-brand-500 hover:bg-brand-600 shadow-theme-sm";
// Inactive page button classes
const inactiveButtonClasses = "bg-white text-gray-700 border-gray-300 hover:bg-gray-50 hover:border-gray-400 shadow-theme-xs";
// Disabled button classes
const disabledButtonClasses = "bg-gray-100 text-gray-400 border-gray-200 cursor-not-allowed opacity-50";
const handlePageClick = (page: number) => {
if (page !== currentPage && page >= 1 && page <= totalPages) {
onPageChange(page);
}
};
return (
<div className={`flex items-center justify-center space-x-1 ${className}`}>
{/* First Page Button */}
{showFirstLast && !isFirstPage && (
<button
onClick={() => handlePageClick(1)}
className={`${baseButtonClasses} ${inactiveButtonClasses}`}
aria-label="Go to first page"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M11 19l-7-7 7-7m8 14l-7-7 7-7" />
</svg>
</button>
)}
{/* Previous Page Button */}
{showPrevNext && (
<button
onClick={() => handlePageClick(currentPage - 1)}
disabled={isFirstPage}
className={`${baseButtonClasses} ${isFirstPage ? disabledButtonClasses : inactiveButtonClasses}`}
aria-label="Go to previous page"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M15 19l-7-7 7-7" />
</svg>
</button>
)}
{/* Page Number Buttons */}
{visiblePages.map((page) => (
<button
key={page}
onClick={() => handlePageClick(page)}
className={`${baseButtonClasses} ${page === currentPage ? activeButtonClasses : inactiveButtonClasses
} min-w-[40px]`}
aria-label={`Go to page ${page}`}
aria-current={page === currentPage ? 'page' : undefined}
>
{page}
</button>
))}
{/* Next Page Button */}
{showPrevNext && (
<button
onClick={() => handlePageClick(currentPage + 1)}
disabled={isLastPage}
className={`${baseButtonClasses} ${isLastPage ? disabledButtonClasses : inactiveButtonClasses}`}
aria-label="Go to next page"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M9 5l7 7-7 7" />
</svg>
</button>
)}
{/* Last Page Button */}
{showFirstLast && !isLastPage && (
<button
onClick={() => handlePageClick(totalPages)}
className={`${baseButtonClasses} ${inactiveButtonClasses}`}
aria-label="Go to last page"
>
<svg className="w-4 h-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M13 5l7 7-7 7M5 5l7 7-7 7" />
</svg>
</button>
)}
</div>
);
};
// Page info component to show current page and total
export const PageInfo: React.FC<{
currentPage: number;
totalPages: number;
totalItems: number;
itemsPerPage: number;
className?: string;
}> = ({ currentPage, totalPages, totalItems, itemsPerPage, className = '' }) => {
const startItem = (currentPage - 1) * itemsPerPage + 1;
const endItem = Math.min(currentPage * itemsPerPage, totalItems);
return (
<div className={`text-sm text-gray-600 ${className}`}>
Showing {startItem} to {endItem} of {totalItems} results (Page {currentPage} of {totalPages})
</div>
);
};

View File

@@ -1,167 +0,0 @@
/**
* PerformanceDashboard Component
*
* A development tool for monitoring video streaming performance.
* Only shown in development mode.
*/
import React, { useState, useEffect } from 'react';
import { performanceMonitor, thumbnailCache } from '../utils';
interface PerformanceDashboardProps {
className?: string;
}
export const PerformanceDashboard: React.FC<PerformanceDashboardProps> = ({
className = '',
}) => {
const [isOpen, setIsOpen] = useState(false);
const [stats, setStats] = useState<any>(null);
const [cacheStats, setCacheStats] = useState<any>(null);
useEffect(() => {
if (isOpen) {
const updateStats = () => {
setStats({
overall: performanceMonitor.getStats(),
getVideos: performanceMonitor.getStats('get_videos'),
getThumbnail: performanceMonitor.getStats('get_thumbnail'),
recentMetrics: performanceMonitor.getRecentMetrics(5),
});
setCacheStats(thumbnailCache.getStats());
};
updateStats();
const interval = setInterval(updateStats, 2000);
return () => clearInterval(interval);
}
}, [isOpen]);
// Only show in development
if (process.env.NODE_ENV !== 'development') {
return null;
}
if (!isOpen) {
return (
<button
onClick={() => setIsOpen(true)}
className={`fixed bottom-4 right-4 bg-blue-600 text-white p-2 rounded-full shadow-lg hover:bg-blue-700 transition-colors z-50 ${className}`}
title="Open Performance Dashboard"
>
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M3 3a1 1 0 000 2v8a2 2 0 002 2h2.586l-1.293 1.293a1 1 0 101.414 1.414L10 15.414l2.293 2.293a1 1 0 001.414-1.414L12.414 15H15a2 2 0 002-2V5a1 1 0 100-2H3zm11.707 4.707a1 1 0 00-1.414-1.414L10 9.586 8.707 8.293a1 1 0 00-1.414 0l-2 2a1 1 0 101.414 1.414L8 10.414l1.293 1.293a1 1 0 001.414 0l4-4z" clipRule="evenodd" />
</svg>
</button>
);
}
return (
<div className={`fixed bottom-4 right-4 bg-white border border-gray-200 rounded-lg shadow-xl p-4 max-w-md w-80 max-h-96 overflow-y-auto z-50 ${className}`}>
<div className="flex items-center justify-between mb-4">
<h3 className="text-lg font-semibold text-gray-900">Performance</h3>
<button
onClick={() => setIsOpen(false)}
className="text-gray-400 hover:text-gray-600"
>
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M4.293 4.293a1 1 0 011.414 0L10 8.586l4.293-4.293a1 1 0 111.414 1.414L11.414 10l4.293 4.293a1 1 0 01-1.414 1.414L10 11.414l-4.293 4.293a1 1 0 01-1.414-1.414L8.586 10 4.293 5.707a1 1 0 010-1.414z" clipRule="evenodd" />
</svg>
</button>
</div>
{stats && (
<div className="space-y-4">
{/* Overall Stats */}
<div>
<h4 className="text-sm font-medium text-gray-700 mb-2">Overall</h4>
<div className="grid grid-cols-2 gap-2 text-xs">
<div>Operations: {stats.overall.totalOperations}</div>
<div>Success: {(stats.overall.successRate * 100).toFixed(1)}%</div>
<div>Avg: {stats.overall.averageDuration.toFixed(0)}ms</div>
<div>Max: {stats.overall.maxDuration.toFixed(0)}ms</div>
</div>
</div>
{/* Video Loading Stats */}
{stats.getVideos.totalOperations > 0 && (
<div>
<h4 className="text-sm font-medium text-gray-700 mb-2">Video Loading</h4>
<div className="grid grid-cols-2 gap-2 text-xs">
<div>Calls: {stats.getVideos.totalOperations}</div>
<div>Success: {(stats.getVideos.successRate * 100).toFixed(1)}%</div>
<div>Avg: {stats.getVideos.averageDuration.toFixed(0)}ms</div>
<div>Max: {stats.getVideos.maxDuration.toFixed(0)}ms</div>
</div>
</div>
)}
{/* Thumbnail Stats */}
{stats.getThumbnail.totalOperations > 0 && (
<div>
<h4 className="text-sm font-medium text-gray-700 mb-2">Thumbnails</h4>
<div className="grid grid-cols-2 gap-2 text-xs">
<div>Calls: {stats.getThumbnail.totalOperations}</div>
<div>Success: {(stats.getThumbnail.successRate * 100).toFixed(1)}%</div>
<div>Avg: {stats.getThumbnail.averageDuration.toFixed(0)}ms</div>
<div>Max: {stats.getThumbnail.maxDuration.toFixed(0)}ms</div>
</div>
</div>
)}
{/* Cache Stats */}
{cacheStats && (
<div>
<h4 className="text-sm font-medium text-gray-700 mb-2">Cache</h4>
<div className="grid grid-cols-2 gap-2 text-xs">
<div>Cached: {cacheStats.size}</div>
<div>Memory: {(cacheStats.totalMemory / 1024 / 1024).toFixed(1)}MB</div>
<div>Hits: {cacheStats.totalAccess}</div>
<div>Avg Size: {(cacheStats.averageSize / 1024).toFixed(0)}KB</div>
</div>
</div>
)}
{/* Recent Operations */}
{stats.recentMetrics.length > 0 && (
<div>
<h4 className="text-sm font-medium text-gray-700 mb-2">Recent</h4>
<div className="space-y-1">
{stats.recentMetrics.map((metric: any, index: number) => (
<div key={index} className="flex justify-between text-xs">
<span className={metric.success ? 'text-green-600' : 'text-red-600'}>
{metric.operation}
</span>
<span>{metric.duration?.toFixed(0)}ms</span>
</div>
))}
</div>
</div>
)}
{/* Actions */}
<div className="flex space-x-2">
<button
onClick={() => {
performanceMonitor.clear();
thumbnailCache.clear();
}}
className="flex-1 px-2 py-1 text-xs bg-red-100 text-red-700 rounded hover:bg-red-200"
>
Clear All
</button>
<button
onClick={() => {
console.log(performanceMonitor.getReport());
console.log('Cache Stats:', thumbnailCache.getStats());
}}
className="flex-1 px-2 py-1 text-xs bg-blue-100 text-blue-700 rounded hover:bg-blue-200"
>
Log Report
</button>
</div>
</div>
)}
</div>
);
};

View File

@@ -1,170 +0,0 @@
/**
* VideoCard Component
*
* A reusable card component for displaying video information with thumbnail, metadata, and actions.
*/
import { type VideoCardProps } from '../types';
import { VideoThumbnail } from './VideoThumbnail';
import {
formatFileSize,
formatVideoDate,
getRelativeTime,
getFormatDisplayName,
getStatusBadgeClass,
getResolutionString,
} from '../utils/videoUtils';
export const VideoCard: React.FC<VideoCardProps> = ({
video,
onClick,
showMetadata = true,
className = '',
}) => {
const handleClick = () => {
if (onClick) {
onClick(video);
}
};
const handleThumbnailClick = () => {
handleClick();
};
const cardClasses = [
'bg-white rounded-xl border border-gray-200 overflow-hidden transition-all hover:shadow-theme-md',
onClick ? 'cursor-pointer hover:border-gray-300' : '',
className,
].filter(Boolean).join(' ');
return (
<div className={cardClasses} onClick={onClick ? handleClick : undefined}>
{/* Thumbnail */}
<div className="relative">
<VideoThumbnail
fileId={video.file_id}
width={320}
height={180}
alt={`Thumbnail for ${video.filename}`}
onClick={onClick ? handleThumbnailClick : undefined}
className="w-full"
/>
{/* Status Badge */}
<div className="absolute top-2 left-2">
<span className={`inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${getStatusBadgeClass(video.status)}`}>
{video.status}
</span>
</div>
{/* Format Badge */}
<div className="absolute top-2 right-2">
<span className="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-gray-100 text-gray-800">
{getFormatDisplayName(video.format)}
</span>
</div>
{/* Streamable Indicator */}
{video.is_streamable ? (
<div className="absolute bottom-2 left-2">
<div className="bg-green-500 text-white text-xs px-2 py-1 rounded flex items-center">
<svg className="w-3 h-3 mr-1" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM9.555 7.168A1 1 0 008 8v4a1 1 0 001.555.832l3-2a1 1 0 000-1.664l-3-2z" clipRule="evenodd" />
</svg>
Streamable
</div>
</div>
) : (
<div className="absolute bottom-2 left-2">
<div className="bg-yellow-500 text-white text-xs px-2 py-1 rounded flex items-center">
<svg className="w-3 h-3 mr-1" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M8.257 3.099c.765-1.36 2.722-1.36 3.486 0l5.58 9.92c.75 1.334-.213 2.98-1.742 2.98H4.42c-1.53 0-2.493-1.646-1.743-2.98l5.58-9.92zM11 13a1 1 0 11-2 0 1 1 0 012 0zm-1-8a1 1 0 00-1 1v3a1 1 0 002 0V6a1 1 0 00-1-1z" clipRule="evenodd" />
</svg>
Processing
</div>
</div>
)}
{/* Conversion Needed Indicator */}
{video.needs_conversion && (
<div className="absolute bottom-2 right-2">
<div className="bg-yellow-500 text-white text-xs px-2 py-1 rounded flex items-center">
<svg className="w-3 h-3 mr-1" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M8.257 3.099c.765-1.36 2.722-1.36 3.486 0l5.58 9.92c.75 1.334-.213 2.98-1.742 2.98H4.42c-1.53 0-2.493-1.646-1.743-2.98l5.58-9.92zM11 13a1 1 0 11-2 0 1 1 0 012 0zm-1-8a1 1 0 00-1 1v3a1 1 0 002 0V6a1 1 0 00-1-1z" clipRule="evenodd" />
</svg>
Needs Conversion
</div>
</div>
)}
</div>
{/* Content */}
<div className="p-4">
{/* Title */}
<h3 className="text-lg font-semibold text-gray-900 mb-2 truncate" title={video.filename}>
{video.filename}
</h3>
{/* Camera Name */}
<div className="flex items-center text-sm text-gray-600 mb-2">
<svg className="w-4 h-4 mr-1" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M4 3a2 2 0 00-2 2v10a2 2 0 002 2h12a2 2 0 002-2V5a2 2 0 00-2-2H4zm12 12H4l4-8 3 6 2-4 3 6z" clipRule="evenodd" />
</svg>
{video.camera_name}
</div>
{/* Basic Info */}
<div className="grid grid-cols-2 gap-4 text-sm text-gray-600 mb-3">
<div>
<span className="font-medium">Size:</span> {formatFileSize(video.file_size_bytes)}
</div>
<div>
<span className="font-medium">Created:</span> {getRelativeTime(video.created_at)}
</div>
</div>
{/* Metadata (if available and requested) */}
{showMetadata && 'metadata' in video && video.metadata && (
<div className="border-t pt-3 mt-3 border-gray-100">
<div className="grid grid-cols-2 gap-4 text-sm text-gray-600">
<div>
<span className="font-medium">Duration:</span> {Math.round(video.metadata.duration_seconds)}s
</div>
<div>
<span className="font-medium">Resolution:</span> {getResolutionString(video.metadata.width, video.metadata.height)}
</div>
<div>
<span className="font-medium">FPS:</span> {video.metadata.fps}
</div>
<div>
<span className="font-medium">Codec:</span> {video.metadata.codec}
</div>
</div>
</div>
)}
{/* Actions */}
<div className="flex justify-between items-center mt-4 pt-3 border-t border-gray-100">
<div className="text-xs text-gray-500">
{formatVideoDate(video.created_at)}
</div>
{onClick && (
<button
onClick={(e) => {
e.stopPropagation();
handleClick();
}}
className="inline-flex items-center px-3 py-1.5 text-xs font-medium transition rounded-lg border border-transparent bg-brand-500 text-white hover:bg-brand-600 shadow-theme-xs"
>
<svg className="w-3 h-3 mr-1" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM9.555 7.168A1 1 0 008 8v4a1 1 0 001.555.832l3-2a1 1 0 000-1.664l-3-2z" clipRule="evenodd" />
</svg>
Play
</button>
)}
</div>
</div>
</div>
);
};

View File

@@ -1,196 +0,0 @@
/**
* VideoDebugger Component
*
* A development tool for debugging video streaming issues.
* Provides direct access to test video URLs and diagnose problems.
*/
import React, { useState } from 'react';
import { videoApiService } from '../services/videoApi';
interface VideoDebuggerProps {
fileId: string;
className?: string;
}
export const VideoDebugger: React.FC<VideoDebuggerProps> = ({
fileId,
className = '',
}) => {
const [isOpen, setIsOpen] = useState(false);
const [testResults, setTestResults] = useState<any>(null);
const [isLoading, setIsLoading] = useState(false);
const streamingUrl = videoApiService.getStreamingUrl(fileId);
const thumbnailUrl = videoApiService.getThumbnailUrl(fileId);
const runDiagnostics = async () => {
setIsLoading(true);
const results: any = {
timestamp: new Date().toISOString(),
fileId,
streamingUrl,
thumbnailUrl,
tests: {}
};
try {
// Test 1: Video Info
try {
const videoInfo = await videoApiService.getVideoInfo(fileId);
results.tests.videoInfo = { success: true, data: videoInfo };
} catch (error) {
results.tests.videoInfo = { success: false, error: error instanceof Error ? error.message : 'Unknown error' };
}
// Test 2: Streaming Info
try {
const streamingInfo = await videoApiService.getStreamingInfo(fileId);
results.tests.streamingInfo = { success: true, data: streamingInfo };
} catch (error) {
results.tests.streamingInfo = { success: false, error: error instanceof Error ? error.message : 'Unknown error' };
}
// Test 3: HEAD request to streaming URL
try {
const response = await fetch(streamingUrl, { method: 'HEAD' });
results.tests.streamingHead = {
success: response.ok,
status: response.status,
headers: Object.fromEntries(response.headers.entries())
};
} catch (error) {
results.tests.streamingHead = { success: false, error: error instanceof Error ? error.message : 'Unknown error' };
}
// Test 4: Range request test
try {
const response = await fetch(streamingUrl, {
headers: { 'Range': 'bytes=0-1023' }
});
results.tests.rangeRequest = {
success: response.ok,
status: response.status,
supportsRanges: response.headers.get('accept-ranges') === 'bytes',
contentRange: response.headers.get('content-range')
};
} catch (error) {
results.tests.rangeRequest = { success: false, error: error instanceof Error ? error.message : 'Unknown error' };
}
// Test 5: Thumbnail test
try {
const response = await fetch(thumbnailUrl, { method: 'HEAD' });
results.tests.thumbnail = {
success: response.ok,
status: response.status,
contentType: response.headers.get('content-type')
};
} catch (error) {
results.tests.thumbnail = { success: false, error: error instanceof Error ? error.message : 'Unknown error' };
}
} catch (error) {
results.error = error instanceof Error ? error.message : 'Unknown error';
}
setTestResults(results);
setIsLoading(false);
};
// Only show in development
if (process.env.NODE_ENV !== 'development') {
return null;
}
if (!isOpen) {
return (
<button
onClick={() => setIsOpen(true)}
className={`px-3 py-1 text-xs bg-yellow-100 text-yellow-800 rounded hover:bg-yellow-200 transition-colors ${className}`}
title="Open Video Debugger"
>
🔧 Debug
</button>
);
}
return (
<div className={`bg-white border border-gray-200 rounded-lg shadow-lg p-4 max-w-2xl ${className}`}>
<div className="flex items-center justify-between mb-4">
<h3 className="text-lg font-semibold text-gray-900">Video Debugger</h3>
<button
onClick={() => setIsOpen(false)}
className="text-gray-400 hover:text-gray-600"
>
</button>
</div>
<div className="space-y-4">
{/* Basic Info */}
<div>
<h4 className="font-medium text-gray-700 mb-2">Basic Info</h4>
<div className="text-sm space-y-1">
<div><strong>File ID:</strong> {fileId}</div>
<div><strong>Streaming URL:</strong> <a href={streamingUrl} target="_blank" rel="noopener noreferrer" className="text-blue-600 hover:underline">{streamingUrl}</a></div>
<div><strong>Thumbnail URL:</strong> <a href={thumbnailUrl} target="_blank" rel="noopener noreferrer" className="text-blue-600 hover:underline">{thumbnailUrl}</a></div>
</div>
</div>
{/* Quick Actions */}
<div>
<h4 className="font-medium text-gray-700 mb-2">Quick Actions</h4>
<div className="flex space-x-2">
<button
onClick={runDiagnostics}
disabled={isLoading}
className="px-3 py-1 text-sm bg-blue-600 text-white rounded hover:bg-blue-700 disabled:opacity-50"
>
{isLoading ? 'Running...' : 'Run Diagnostics'}
</button>
<button
onClick={() => window.open(streamingUrl, '_blank')}
className="px-3 py-1 text-sm bg-green-600 text-white rounded hover:bg-green-700"
>
Open Video
</button>
<button
onClick={() => window.open(thumbnailUrl, '_blank')}
className="px-3 py-1 text-sm bg-purple-600 text-white rounded hover:bg-purple-700"
>
Open Thumbnail
</button>
</div>
</div>
{/* Test Results */}
{testResults && (
<div>
<h4 className="font-medium text-gray-700 mb-2">Diagnostic Results</h4>
<div className="bg-gray-50 rounded p-3 text-xs font-mono max-h-64 overflow-y-auto">
<pre>{JSON.stringify(testResults, null, 2)}</pre>
</div>
</div>
)}
{/* Native Video Test */}
<div>
<h4 className="font-medium text-gray-700 mb-2">Native Video Test</h4>
<video
controls
width="100%"
height="200"
className="border rounded"
onLoadStart={() => console.log('Native video load started')}
onLoadedData={() => console.log('Native video data loaded')}
onError={(e) => console.error('Native video error:', e)}
>
<source src={streamingUrl} type="video/mp4" />
Your browser does not support the video tag.
</video>
</div>
</div>
</div>
);
};

View File

@@ -1,146 +0,0 @@
/**
* VideoErrorBoundary Component
*
* Error boundary specifically designed for video streaming components.
* Provides user-friendly error messages and recovery options.
*/
import React, { Component, ReactNode } from 'react';
interface Props {
children: ReactNode;
fallback?: ReactNode;
onError?: (error: Error, errorInfo: React.ErrorInfo) => void;
}
interface State {
hasError: boolean;
error: Error | null;
errorInfo: React.ErrorInfo | null;
}
export class VideoErrorBoundary extends Component<Props, State> {
constructor(props: Props) {
super(props);
this.state = {
hasError: false,
error: null,
errorInfo: null,
};
}
static getDerivedStateFromError(error: Error): State {
return {
hasError: true,
error,
errorInfo: null,
};
}
componentDidCatch(error: Error, errorInfo: React.ErrorInfo) {
this.setState({
error,
errorInfo,
});
// Call the onError callback if provided
if (this.props.onError) {
this.props.onError(error, errorInfo);
}
// Log error for debugging
console.error('Video streaming error:', error, errorInfo);
}
handleRetry = () => {
this.setState({
hasError: false,
error: null,
errorInfo: null,
});
};
render() {
if (this.state.hasError) {
// Use custom fallback if provided
if (this.props.fallback) {
return this.props.fallback;
}
// Default error UI
return (
<div className="min-h-[400px] flex items-center justify-center bg-gray-50 rounded-lg border border-gray-200">
<div className="text-center max-w-md mx-auto p-6">
<div className="w-16 h-16 mx-auto mb-4 bg-red-100 rounded-full flex items-center justify-center">
<svg className="w-8 h-8 text-red-600" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
</svg>
</div>
<h3 className="text-lg font-semibold text-gray-900 mb-2">
Video System Error
</h3>
<p className="text-gray-600 mb-4">
Something went wrong with the video streaming component. This might be due to:
</p>
<ul className="text-sm text-gray-500 text-left mb-6 space-y-1">
<li> Network connectivity issues</li>
<li> Video API server problems</li>
<li> Corrupted video files</li>
<li> Browser compatibility issues</li>
</ul>
<div className="space-y-3">
<button
onClick={this.handleRetry}
className="w-full px-4 py-2 bg-blue-600 text-white rounded-lg hover:bg-blue-700 transition-colors"
>
Try Again
</button>
<button
onClick={() => window.location.reload()}
className="w-full px-4 py-2 bg-gray-200 text-gray-800 rounded-lg hover:bg-gray-300 transition-colors"
>
Reload Page
</button>
</div>
{/* Error details for debugging (only in development) */}
{process.env.NODE_ENV === 'development' && this.state.error && (
<details className="mt-6 text-left">
<summary className="cursor-pointer text-sm text-gray-500 hover:text-gray-700">
Show Error Details
</summary>
<div className="mt-2 p-3 bg-gray-100 rounded text-xs font-mono text-gray-700 overflow-auto max-h-32">
<div className="font-semibold mb-1">Error:</div>
<div className="mb-2">{this.state.error.message}</div>
<div className="font-semibold mb-1">Stack:</div>
<div>{this.state.error.stack}</div>
</div>
</details>
)}
</div>
</div>
);
}
return this.props.children;
}
}
// Higher-order component for easier usage
export function withVideoErrorBoundary<P extends object>(
Component: React.ComponentType<P>,
fallback?: ReactNode
) {
return function WrappedComponent(props: P) {
return (
<VideoErrorBoundary fallback={fallback}>
<Component {...props} />
</VideoErrorBoundary>
);
};
}

View File

@@ -1,231 +0,0 @@
/**
* VideoList Component
*
* A reusable component for displaying a list/grid of videos with filtering, sorting, and pagination.
*/
import React, { useState, useEffect } from 'react';
import { type VideoListProps, type VideoListFilters, type VideoListSortOptions } from '../types';
import { useVideoList } from '../hooks/useVideoList';
import { VideoCard } from './VideoCard';
import { Pagination, PageInfo } from './Pagination';
export const VideoList: React.FC<VideoListProps> = ({
filters,
sortOptions,
limit = 20,
onVideoSelect,
className = '',
}) => {
const [localFilters, setLocalFilters] = useState<VideoListFilters>(filters || {});
const [localSort, setLocalSort] = useState<VideoListSortOptions>(
sortOptions || { field: 'created_at', direction: 'desc' }
);
const {
videos,
totalCount,
currentPage,
totalPages,
loading,
error,
refetch,
loadMore,
hasMore,
goToPage,
nextPage,
previousPage,
updateFilters,
updateSort,
} = useVideoList({
initialParams: {
camera_name: localFilters.cameraName,
start_date: localFilters.dateRange?.start,
end_date: localFilters.dateRange?.end,
limit,
include_metadata: true,
page: 1, // Start with page 1
},
autoFetch: true,
});
// Update filters when props change (without causing infinite loops)
useEffect(() => {
if (filters) {
setLocalFilters(filters);
}
}, [filters]);
// Update sort when props change (without causing infinite loops)
useEffect(() => {
if (sortOptions) {
setLocalSort(sortOptions);
}
}, [sortOptions]);
const handleVideoClick = (video: any) => {
if (onVideoSelect) {
onVideoSelect(video);
}
};
const handleLoadMore = () => {
if (hasMore && loading !== 'loading') {
loadMore();
}
};
const containerClasses = [
'video-list',
className,
].filter(Boolean).join(' ');
if (loading === 'loading' && videos.length === 0) {
return (
<div className={containerClasses}>
<div className="flex items-center justify-center py-12">
<div className="text-center">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-blue-500 mx-auto mb-4"></div>
<p className="text-gray-600">Loading videos...</p>
</div>
</div>
</div>
);
}
if (error) {
return (
<div className={containerClasses}>
<div className="flex items-center justify-center py-12">
<div className="text-center">
<svg className="w-12 h-12 text-red-400 mx-auto mb-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
</svg>
<h3 className="text-lg font-medium text-gray-900 mb-2">Error Loading Videos</h3>
<p className="text-gray-600 mb-4">{error.message}</p>
<button
onClick={refetch}
className="inline-flex items-center px-4 py-2 border border-transparent text-sm font-medium rounded-md text-white bg-blue-600 hover:bg-blue-700 focus:outline-none focus:ring-2 focus:ring-offset-2 focus:ring-blue-500"
>
Try Again
</button>
</div>
</div>
</div>
);
}
if (videos.length === 0) {
return (
<div className={containerClasses}>
<div className="flex items-center justify-center py-12">
<div className="text-center">
<svg className="w-12 h-12 text-gray-400 mx-auto mb-4" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M15 10l4.553-2.276A1 1 0 0121 8.618v6.764a1 1 0 01-1.447.894L15 14M5 18h8a2 2 0 002-2V8a2 2 0 00-2-2H5a2 2 0 00-2 2v8a2 2 0 002 2z" />
</svg>
<h3 className="text-lg font-medium text-gray-900 mb-2">No Videos Found</h3>
<p className="text-gray-600">No videos match your current filters.</p>
</div>
</div>
</div>
);
}
return (
<div className={containerClasses}>
{/* Top Pagination */}
{totalPages > 1 && (
<div className="mb-6 flex flex-col sm:flex-row sm:items-center sm:justify-between gap-4 p-4 bg-white rounded-xl border border-gray-200 shadow-theme-sm">
{/* Page Info */}
<PageInfo
currentPage={currentPage}
totalPages={totalPages}
totalItems={totalCount}
itemsPerPage={limit}
className="text-sm text-gray-600"
/>
{/* Pagination Controls */}
<Pagination
currentPage={currentPage}
totalPages={totalPages}
onPageChange={goToPage}
showFirstLast={true}
showPrevNext={true}
maxVisiblePages={5}
className="justify-center sm:justify-end"
/>
</div>
)}
{/* Results Summary */}
<div className="flex items-center justify-between mb-6">
<div className="text-sm text-gray-600">
{totalPages > 0 ? (
<>Showing page {currentPage} of {totalPages} ({totalCount} total videos)</>
) : (
<>Showing {videos.length} of {totalCount} videos</>
)}
</div>
<button
onClick={refetch}
disabled={loading === 'loading'}
className="inline-flex items-center px-3 py-2 text-sm font-medium transition rounded-lg border bg-white text-gray-700 border-gray-300 hover:bg-gray-50 disabled:opacity-50 disabled:cursor-not-allowed shadow-theme-xs"
>
<svg className="w-4 h-4 mr-1" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M4 4v5h.582m15.356 2A8.001 8.001 0 004.582 9m0 0H9m11 11v-5h-.581m0 0a8.003 8.003 0 01-15.357-2m15.357 2H15" />
</svg>
{loading === 'loading' ? 'Refreshing...' : 'Refresh'}
</button>
</div>
{/* Video Grid */}
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-4 gap-6">
{videos.map((video) => (
<VideoCard
key={video.file_id}
video={video}
onClick={onVideoSelect ? handleVideoClick : undefined}
showMetadata={true}
/>
))}
</div>
{/* Bottom Pagination */}
{totalPages > 1 && videos.length > 0 && (
<div className="mt-8 flex flex-col sm:flex-row sm:items-center sm:justify-between gap-4 p-4 bg-white rounded-xl border border-gray-200 shadow-theme-sm">
{/* Page Info */}
<PageInfo
currentPage={currentPage}
totalPages={totalPages}
totalItems={totalCount}
itemsPerPage={limit}
className="text-sm text-gray-600"
/>
{/* Pagination Controls */}
<Pagination
currentPage={currentPage}
totalPages={totalPages}
onPageChange={goToPage}
showFirstLast={true}
showPrevNext={true}
maxVisiblePages={5}
className="justify-center sm:justify-end"
/>
</div>
)}
{/* Loading Indicator */}
{loading === 'loading' && (
<div className="flex justify-center mt-8">
<div className="text-sm text-gray-600 flex items-center">
<div className="animate-spin rounded-full h-4 w-4 border-b-2 border-brand-500 mr-2"></div>
Loading videos...
</div>
</div>
)}
</div>
);
};

View File

@@ -1,234 +0,0 @@
/**
* VideoModal Component
*
* A modal component for displaying videos in fullscreen with detailed information.
*/
import React, { useEffect } from 'react';
import { type VideoFile } from '../types';
import { VideoPlayer } from './VideoPlayer';
import { VideoDebugger } from './VideoDebugger';
import { useVideoInfo } from '../hooks/useVideoInfo';
import {
formatFileSize,
formatVideoDate,
getFormatDisplayName,
getStatusBadgeClass,
getResolutionString,
formatDuration,
isWebCompatible,
} from '../utils/videoUtils';
interface VideoModalProps {
video: VideoFile | null;
isOpen: boolean;
onClose: () => void;
}
export const VideoModal: React.FC<VideoModalProps> = ({
video,
isOpen,
onClose,
}) => {
const { videoInfo, streamingInfo, loading, error } = useVideoInfo(
video?.file_id || null,
{ autoFetch: isOpen && !!video }
);
// Handle escape key
useEffect(() => {
const handleEscape = (e: KeyboardEvent) => {
if (e.key === 'Escape') {
onClose();
}
};
if (isOpen) {
document.addEventListener('keydown', handleEscape);
document.body.style.overflow = 'hidden';
}
return () => {
document.removeEventListener('keydown', handleEscape);
document.body.style.overflow = 'unset';
};
}, [isOpen, onClose]);
if (!isOpen || !video) {
return null;
}
const handleBackdropClick = (e: React.MouseEvent) => {
if (e.target === e.currentTarget) {
onClose();
}
};
return (
<div className="fixed inset-0 z-[999999] overflow-y-auto">
{/* Backdrop */}
<div
className="fixed inset-0 bg-black bg-opacity-75 transition-opacity"
onClick={handleBackdropClick}
/>
{/* Modal */}
<div className="flex min-h-full items-center justify-center p-4">
<div className="relative bg-white rounded-lg shadow-xl max-w-6xl w-full max-h-[90vh] overflow-hidden">
{/* Header */}
<div className="flex items-center justify-between p-4 border-b">
<h2 className="text-xl font-semibold text-gray-900 truncate pr-4">
{video.filename}
</h2>
<button
onClick={onClose}
className="text-gray-400 hover:text-gray-600 focus:outline-none focus:ring-2 focus:ring-blue-500 rounded p-1"
>
<svg className="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
</div>
{/* Content */}
<div className="flex flex-col lg:flex-row max-h-[calc(90vh-80px)]">
{/* Video Player */}
<div className="flex-1 bg-black">
<VideoPlayer
fileId={video.file_id}
controls={true}
className="w-full h-full min-h-[300px] lg:min-h-[400px]"
/>
</div>
{/* Sidebar with Video Info */}
<div className="w-full lg:w-80 bg-gray-50 overflow-y-auto">
<div className="p-4 space-y-4">
{/* Status and Format */}
<div className="flex items-center space-x-2 flex-wrap">
<span className={`inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${getStatusBadgeClass(video.status)}`}>
{video.status}
</span>
<span className={`inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium ${isWebCompatible(video.format)
? 'bg-green-100 text-green-800'
: 'bg-orange-100 text-orange-800'
}`}>
{getFormatDisplayName(video.format)}
</span>
{isWebCompatible(video.format) && (
<span className="inline-flex items-center px-2.5 py-0.5 rounded-full text-xs font-medium bg-blue-100 text-blue-800">
Web Compatible
</span>
)}
</div>
{/* Basic Info */}
<div className="space-y-3">
<div>
<h3 className="text-sm font-medium text-gray-900 mb-2">Basic Information</h3>
<dl className="space-y-2 text-sm">
<div className="flex justify-between">
<dt className="text-gray-500">Camera:</dt>
<dd className="text-gray-900">{video.camera_name}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">File Size:</dt>
<dd className="text-gray-900">{formatFileSize(video.file_size_bytes)}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Created:</dt>
<dd className="text-gray-900">{formatVideoDate(video.created_at)}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Streamable:</dt>
<dd className="text-gray-900">{video.is_streamable ? 'Yes' : 'No'}</dd>
</div>
</dl>
</div>
{/* Video Metadata */}
{videoInfo?.metadata && (
<div>
<h3 className="text-sm font-medium text-gray-900 mb-2">Video Details</h3>
<dl className="space-y-2 text-sm">
<div className="flex justify-between">
<dt className="text-gray-500">Duration:</dt>
<dd className="text-gray-900">{formatDuration(videoInfo.metadata.duration_seconds)}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Resolution:</dt>
<dd className="text-gray-900">
{getResolutionString(videoInfo.metadata.width, videoInfo.metadata.height)}
</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Frame Rate:</dt>
<dd className="text-gray-900">{videoInfo.metadata.fps} fps</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Codec:</dt>
<dd className="text-gray-900">{videoInfo.metadata.codec}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Aspect Ratio:</dt>
<dd className="text-gray-900">{videoInfo.metadata.aspect_ratio.toFixed(2)}</dd>
</div>
</dl>
</div>
)}
{/* Streaming Info */}
{streamingInfo && (
<div>
<h3 className="text-sm font-medium text-gray-900 mb-2">Streaming Details</h3>
<dl className="space-y-2 text-sm">
<div className="flex justify-between">
<dt className="text-gray-500">Content Type:</dt>
<dd className="text-gray-900">{streamingInfo.content_type}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Range Requests:</dt>
<dd className="text-gray-900">{streamingInfo.supports_range_requests ? 'Supported' : 'Not Supported'}</dd>
</div>
<div className="flex justify-between">
<dt className="text-gray-500">Chunk Size:</dt>
<dd className="text-gray-900">{formatFileSize(streamingInfo.chunk_size_bytes)}</dd>
</div>
</dl>
</div>
)}
{/* Loading State */}
{loading === 'loading' && (
<div className="flex items-center justify-center py-4">
<div className="animate-spin rounded-full h-6 w-6 border-b-2 border-blue-500"></div>
<span className="ml-2 text-sm text-gray-600">Loading video details...</span>
</div>
)}
{/* Error State */}
{error && (
<div className="bg-red-50 border border-red-200 rounded-md p-3">
<div className="flex">
<svg className="w-5 h-5 text-red-400" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM8.707 7.293a1 1 0 00-1.414 1.414L8.586 10l-1.293 1.293a1 1 0 101.414 1.414L10 11.414l1.293 1.293a1 1 0 001.414-1.414L11.414 10l1.293-1.293a1 1 0 00-1.414-1.414L10 8.586 8.707 7.293z" clipRule="evenodd" />
</svg>
<div className="ml-3">
<h3 className="text-sm font-medium text-red-800">Error loading video details</h3>
<p className="text-sm text-red-700 mt-1">{error.message}</p>
</div>
</div>
</div>
)}
{/* Video Debugger (development only) */}
<VideoDebugger fileId={video.file_id} />
</div>
</div>
</div>
</div>
</div>
</div>
</div>
);
};

View File

@@ -1,242 +0,0 @@
/**
* VideoPlayer Component
*
* A reusable video player component with full controls and customization options.
* Uses the useVideoPlayer hook for state management and provides a clean interface.
*/
import React, { forwardRef, useState, useEffect } from 'react';
import { useVideoPlayer } from '../hooks/useVideoPlayer';
import { videoApiService } from '../services/videoApi';
import { type VideoPlayerProps } from '../types';
import { formatDuration, getVideoMimeType } from '../utils/videoUtils';
export const VideoPlayer = forwardRef<HTMLVideoElement, VideoPlayerProps>(({
fileId,
autoPlay = false,
controls = true,
width = '100%',
height = 'auto',
className = '',
onPlay,
onPause,
onEnded,
onError,
}, forwardedRef) => {
const [videoInfo, setVideoInfo] = useState<{ filename?: string; mimeType: string; isStreamable?: boolean }>({
mimeType: 'video/mp4' // Default to MP4
});
const { state, actions, ref } = useVideoPlayer({
autoPlay,
onPlay,
onPause,
onEnded,
onError,
});
// Combine refs
React.useImperativeHandle(forwardedRef, () => ref.current!, [ref]);
const streamingUrl = videoApiService.getStreamingUrl(fileId);
// Fetch video info to determine MIME type and streamability
useEffect(() => {
const fetchVideoInfo = async () => {
try {
const info = await videoApiService.getVideoInfo(fileId);
if (info.file_id) {
// Extract filename from file_id or use a default pattern
const filename = info.file_id.includes('.') ? info.file_id : `${info.file_id}.mp4`;
const mimeType = getVideoMimeType(filename);
setVideoInfo({
filename,
mimeType,
isStreamable: info.is_streamable
});
}
} catch (error) {
console.warn('Could not fetch video info, using default MIME type:', error);
// Keep default MP4 MIME type, assume not streamable
setVideoInfo(prev => ({ ...prev, isStreamable: false }));
}
};
fetchVideoInfo();
}, [fileId]);
const handleSeek = (e: React.MouseEvent<HTMLDivElement>) => {
if (!ref.current) return;
const rect = e.currentTarget.getBoundingClientRect();
const clickX = e.clientX - rect.left;
const percentage = clickX / rect.width;
const newTime = percentage * state.duration;
actions.seek(newTime);
};
const handleVolumeChange = (e: React.ChangeEvent<HTMLInputElement>) => {
actions.setVolume(parseFloat(e.target.value));
};
return (
<div className={`video-player relative ${className}`} style={{ width, height }}>
{/* Video Element */}
<video
ref={ref}
className="w-full h-full bg-black"
controls={!controls || state.error} // Use native controls if custom controls are disabled or there's an error
style={{ width, height }}
playsInline // Important for iOS compatibility
preload="metadata" // Load metadata first for better UX
>
<source src={streamingUrl} type={videoInfo.mimeType} />
{/* Fallback for MP4 if original format fails */}
{videoInfo.mimeType !== 'video/mp4' && (
<source src={streamingUrl} type="video/mp4" />
)}
Your browser does not support the video tag.
</video>
{/* Loading Overlay */}
{state.isLoading && (
<div className="absolute inset-0 bg-black bg-opacity-50 flex items-center justify-center">
<div className="text-white text-center">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-white mx-auto mb-2"></div>
<div className="text-lg">Loading video...</div>
</div>
</div>
)}
{/* Error Overlay */}
{state.error && (
<div className="absolute inset-0 bg-black bg-opacity-75 flex items-center justify-center">
<div className="text-red-400 text-center">
<div className="text-lg mb-2">Playback Error</div>
<div className="text-sm">{state.error}</div>
</div>
</div>
)}
{/* Custom Controls */}
{controls && (
<div className="absolute bottom-0 left-0 right-0 bg-gradient-to-t from-black to-transparent p-4">
{/* Progress Bar */}
<div className="mb-3">
<div
className="w-full h-2 bg-gray-600 rounded cursor-pointer"
onClick={handleSeek}
>
<div
className="h-full bg-blue-500 rounded"
style={{
width: `${state.duration > 0 ? (state.currentTime / state.duration) * 100 : 0}%`
}}
/>
</div>
</div>
{/* Control Bar */}
<div className="flex items-center justify-between text-white">
{/* Left Controls */}
<div className="flex items-center space-x-3">
{/* Play/Pause Button */}
<button
onClick={actions.togglePlay}
className="p-2 hover:bg-white hover:bg-opacity-20 rounded"
disabled={state.isLoading}
>
{state.isPlaying ? (
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M18 10a8 8 0 11-16 0 8 8 0 0116 0zM7 8a1 1 0 012 0v4a1 1 0 11-2 0V8zm5-1a1 1 0 00-1 1v4a1 1 0 102 0V8a1 1 0 00-1-1z" clipRule="evenodd" />
</svg>
) : (
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM9.555 7.168A1 1 0 008 8v4a1 1 0 001.555.832l3-2a1 1 0 000-1.664l-3-2z" clipRule="evenodd" />
</svg>
)}
</button>
{/* Skip Backward */}
<button
onClick={() => actions.skip(-10)}
className="p-2 hover:bg-white hover:bg-opacity-20 rounded"
title="Skip backward 10s"
>
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M15.707 15.707a1 1 0 01-1.414 0l-5-5a1 1 0 010-1.414l5-5a1 1 0 111.414 1.414L11.414 9H17a1 1 0 110 2h-5.586l3.293 3.293a1 1 0 010 1.414z" clipRule="evenodd" />
</svg>
</button>
{/* Skip Forward */}
<button
onClick={() => actions.skip(10)}
className="p-2 hover:bg-white hover:bg-opacity-20 rounded"
title="Skip forward 10s"
>
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M4.293 4.293a1 1 0 011.414 0l5 5a1 1 0 010 1.414l-5 5a1 1 0 01-1.414-1.414L8.586 11H3a1 1 0 110-2h5.586L4.293 5.707a1 1 0 010-1.414z" clipRule="evenodd" />
</svg>
</button>
{/* Time Display */}
<div className="text-sm">
{formatDuration(state.currentTime)} / {formatDuration(state.duration)}
</div>
</div>
{/* Right Controls */}
<div className="flex items-center space-x-3">
{/* Volume Control */}
<div className="flex items-center space-x-2">
<button
onClick={actions.toggleMute}
className="p-2 hover:bg-white hover:bg-opacity-20 rounded"
>
{state.isMuted || state.volume === 0 ? (
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M9.383 3.076A1 1 0 0110 4v12a1 1 0 01-1.617.776L4.83 13H2a1 1 0 01-1-1V8a1 1 0 011-1h2.83l3.553-3.776a1 1 0 011.617.776zM14.657 2.929a1 1 0 011.414 0A9.972 9.972 0 0119 10a9.972 9.972 0 01-2.929 7.071 1 1 0 11-1.414-1.414A7.971 7.971 0 0017 10c0-2.21-.894-4.208-2.343-5.657a1 1 0 010-1.414zm-2.829 2.828a1 1 0 011.415 0A5.983 5.983 0 0115 10a5.984 5.984 0 01-1.757 4.243 1 1 0 01-1.415-1.415A3.984 3.984 0 0013 10a3.983 3.983 0 00-1.172-2.828 1 1 0 010-1.415z" clipRule="evenodd" />
</svg>
) : (
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M9.383 3.076A1 1 0 0110 4v12a1 1 0 01-1.617.776L4.83 13H2a1 1 0 01-1-1V8a1 1 0 011-1h2.83l3.553-3.776a1 1 0 011.617.776zM14.657 2.929a1 1 0 011.414 0A9.972 9.972 0 0119 10a9.972 9.972 0 01-2.929 7.071 1 1 0 11-1.414-1.414A7.971 7.971 0 0017 10c0-2.21-.894-4.208-2.343-5.657a1 1 0 010-1.414zm-2.829 2.828a1 1 0 011.415 0A5.983 5.983 0 0115 10a5.984 5.984 0 01-1.757 4.243 1 1 0 01-1.415-1.415A3.984 3.984 0 0013 10a3.983 3.983 0 00-1.172-2.828 1 1 0 010-1.415z" clipRule="evenodd" />
</svg>
)}
</button>
<input
type="range"
min="0"
max="1"
step="0.1"
value={state.volume}
onChange={handleVolumeChange}
className="w-20 h-1 bg-gray-600 rounded-lg appearance-none cursor-pointer"
/>
</div>
{/* Fullscreen Button */}
<button
onClick={actions.toggleFullscreen}
className="p-2 hover:bg-white hover:bg-opacity-20 rounded"
>
{state.isFullscreen ? (
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M3 4a1 1 0 011-1h4a1 1 0 010 2H6.414l2.293 2.293a1 1 0 11-1.414 1.414L5 6.414V8a1 1 0 01-2 0V4zm9 1a1 1 0 010-2h4a1 1 0 011 1v4a1 1 0 01-2 0V6.414l-2.293 2.293a1 1 0 11-1.414-1.414L13.586 5H12zm-9 7a1 1 0 012 0v1.586l2.293-2.293a1 1 0 111.414 1.414L6.414 15H8a1 1 0 010 2H4a1 1 0 01-1-1v-4zm13-1a1 1 0 011 1v4a1 1 0 01-1 1h-4a1 1 0 010-2h1.586l-2.293-2.293a1 1 0 111.414-1.414L15 13.586V12a1 1 0 011-1z" clipRule="evenodd" />
</svg>
) : (
<svg className="w-5 h-5" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M3 4a1 1 0 011-1h4a1 1 0 010 2H6.414l2.293 2.293a1 1 0 11-1.414 1.414L5 6.414V8a1 1 0 01-2 0V4zm9 1a1 1 0 010-2h4a1 1 0 011 1v4a1 1 0 01-2 0V6.414l-2.293 2.293a1 1 0 11-1.414-1.414L13.586 5H12zm-9 7a1 1 0 012 0v1.586l2.293-2.293a1 1 0 111.414 1.414L6.414 15H8a1 1 0 010 2H4a1 1 0 01-1-1v-4zm13-1a1 1 0 011 1v4a1 1 0 01-1 1h-4a1 1 0 010-2h1.586l-2.293-2.293a1 1 0 111.414-1.414L15 13.586V12a1 1 0 011-1z" clipRule="evenodd" />
</svg>
)}
</button>
</div>
</div>
</div>
)}
</div>
);
});
VideoPlayer.displayName = 'VideoPlayer';

View File

@@ -1,138 +0,0 @@
/**
* VideoThumbnail Component
*
* A reusable component for displaying video thumbnails with loading states and error handling.
*/
import React, { useState, useEffect } from 'react';
import { videoApiService } from '../services/videoApi';
import { thumbnailCache } from '../utils/thumbnailCache';
import { type VideoThumbnailProps } from '../types';
export const VideoThumbnail: React.FC<VideoThumbnailProps> = ({
fileId,
timestamp = 0,
width = 320,
height = 240,
alt = 'Video thumbnail',
className = '',
onClick,
}) => {
const [thumbnailUrl, setThumbnailUrl] = useState<string | null>(null);
const [isLoading, setIsLoading] = useState(true);
const [error, setError] = useState<string | null>(null);
useEffect(() => {
let isMounted = true;
const loadThumbnail = async () => {
try {
setIsLoading(true);
setError(null);
// Check cache first
const cachedUrl = thumbnailCache.get(fileId, timestamp, width, height);
if (cachedUrl && isMounted) {
setThumbnailUrl(cachedUrl);
setIsLoading(false);
return;
}
// Fetch from API if not cached
const blob = await videoApiService.getThumbnailBlob(fileId, {
timestamp,
width,
height,
});
if (isMounted) {
// Store in cache and get URL
const url = thumbnailCache.set(fileId, timestamp, width, height, blob);
setThumbnailUrl(url);
setIsLoading(false);
}
} catch (err) {
if (isMounted) {
setError(err instanceof Error ? err.message : 'Failed to load thumbnail');
setIsLoading(false);
}
}
};
loadThumbnail();
return () => {
isMounted = false;
// Note: We don't revoke the URL here since it's managed by the cache
};
}, [fileId, timestamp, width, height]);
// Note: URL cleanup is now handled by the thumbnail cache
const handleClick = () => {
if (onClick && !isLoading && !error) {
onClick();
}
};
const containerClasses = [
'relative overflow-hidden bg-gray-200 rounded',
onClick && !isLoading && !error ? 'cursor-pointer hover:opacity-80 transition-opacity' : '',
className,
].filter(Boolean).join(' ');
return (
<div
className={containerClasses}
style={{ width, height }}
onClick={handleClick}
>
{/* Loading State */}
{isLoading && (
<div className="absolute inset-0 flex items-center justify-center bg-gray-100">
<div className="animate-spin rounded-full h-8 w-8 border-b-2 border-blue-500"></div>
</div>
)}
{/* Error State */}
{error && (
<div className="absolute inset-0 flex items-center justify-center bg-gray-100 text-gray-500 text-sm p-2 text-center">
<div>
<svg className="w-8 h-8 mx-auto mb-2 text-gray-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path strokeLinecap="round" strokeLinejoin="round" strokeWidth={2} d="M12 8v4m0 4h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z" />
</svg>
<div>Failed to load thumbnail</div>
</div>
</div>
)}
{/* Thumbnail Image */}
{thumbnailUrl && !isLoading && !error && (
<img
src={thumbnailUrl}
alt={alt}
className="w-full h-full object-cover"
onError={() => setError('Failed to display thumbnail')}
/>
)}
{/* Play Overlay */}
{onClick && !isLoading && !error && (
<div className="absolute inset-0 flex items-center justify-center opacity-0 hover:opacity-100 transition-opacity bg-black bg-opacity-30">
<div className="bg-white bg-opacity-90 rounded-full p-3">
<svg className="w-6 h-6 text-gray-800" fill="currentColor" viewBox="0 0 20 20">
<path fillRule="evenodd" d="M10 18a8 8 0 100-16 8 8 0 000 16zM9.555 7.168A1 1 0 008 8v4a1 1 0 001.555.832l3-2a1 1 0 000-1.664l-3-2z" clipRule="evenodd" />
</svg>
</div>
</div>
)}
{/* Timestamp Badge */}
{timestamp > 0 && !isLoading && !error && (
<div className="absolute bottom-2 right-2 bg-black bg-opacity-75 text-white text-xs px-2 py-1 rounded">
{Math.floor(timestamp / 60)}:{(timestamp % 60).toString().padStart(2, '0')}
</div>
)}
</div>
);
};

View File

@@ -1,26 +0,0 @@
/**
* Video Streaming Components - Index
*
* Centralized export for all video streaming components.
* This makes it easy to import components from a single location.
*/
export { VideoPlayer } from './VideoPlayer';
export { VideoThumbnail } from './VideoThumbnail';
export { VideoCard } from './VideoCard';
export { VideoList } from './VideoList';
export { VideoModal } from './VideoModal';
export { Pagination, PageInfo } from './Pagination';
export { ApiStatusIndicator } from './ApiStatusIndicator';
export { VideoErrorBoundary, withVideoErrorBoundary } from './VideoErrorBoundary';
export { PerformanceDashboard } from './PerformanceDashboard';
export { VideoDebugger } from './VideoDebugger';
// Re-export component prop types for convenience
export type {
VideoPlayerProps,
VideoThumbnailProps,
VideoCardProps,
VideoListProps,
PaginationProps,
} from '../types';

View File

@@ -1,16 +0,0 @@
/**
* Video Streaming Hooks - Index
*
* Centralized export for all video streaming hooks.
* This makes it easy to import hooks from a single location.
*/
export { useVideoList, type UseVideoListReturn } from './useVideoList';
export { useVideoPlayer, type UseVideoPlayerReturn, type VideoPlayerState } from './useVideoPlayer';
export { useVideoInfo, type UseVideoInfoReturn } from './useVideoInfo';
// Re-export types that are commonly used with hooks
export type {
VideoListFilters,
VideoListSortOptions,
} from '../types';

View File

@@ -1,191 +0,0 @@
/**
* useVideoInfo Hook
*
* Custom React hook for fetching and managing video metadata and streaming information.
*/
import { useState, useEffect, useCallback, useRef } from 'react';
import { videoApiService } from '../services/videoApi';
import {
type VideoInfoResponse,
type VideoStreamingInfo,
type VideoError,
type LoadingState
} from '../types';
export interface UseVideoInfoReturn {
videoInfo: VideoInfoResponse | null;
streamingInfo: VideoStreamingInfo | null;
loading: LoadingState;
error: VideoError | null;
refetch: () => Promise<void>;
clearCache: () => void;
reset: () => void;
}
interface UseVideoInfoOptions {
autoFetch?: boolean;
cacheKey?: string;
}
export function useVideoInfo(
fileId: string | null,
options: UseVideoInfoOptions = {}
) {
const { autoFetch = true, cacheKey = 'default' } = options;
// State
const [videoInfo, setVideoInfo] = useState<VideoInfoResponse | null>(null);
const [streamingInfo, setStreamingInfo] = useState<VideoStreamingInfo | null>(null);
const [loading, setLoading] = useState<LoadingState>('idle');
const [error, setError] = useState<VideoError | null>(null);
// Refs for cleanup and caching
const abortControllerRef = useRef<AbortController | null>(null);
const cacheRef = useRef<Map<string, {
videoInfo: VideoInfoResponse;
streamingInfo: VideoStreamingInfo;
timestamp: number;
}>>(new Map());
const CACHE_DURATION = 10 * 60 * 1000; // 10 minutes
/**
* Check if cached data is still valid
*/
const isCacheValid = useCallback((timestamp: number): boolean => {
return Date.now() - timestamp < CACHE_DURATION;
}, [CACHE_DURATION]);
/**
* Fetch video information
*/
const fetchVideoInfo = useCallback(async (id: string): Promise<void> => {
// Cancel any ongoing request
if (abortControllerRef.current) {
abortControllerRef.current.abort();
}
const controller = new AbortController();
abortControllerRef.current = controller;
try {
setLoading('loading');
setError(null);
// Check cache first
const key = `${cacheKey}_${id}`;
const cached = cacheRef.current.get(key);
if (cached && isCacheValid(cached.timestamp)) {
setVideoInfo(cached.videoInfo);
setStreamingInfo(cached.streamingInfo);
setLoading('success');
return;
}
// Fetch both video info and streaming info in parallel
const [videoInfoResponse, streamingInfoResponse] = await Promise.all([
videoApiService.getVideoInfo(id),
videoApiService.getStreamingInfo(id)
]);
// Check if request was aborted
if (controller.signal.aborted) {
return;
}
// Update cache
cacheRef.current.set(key, {
videoInfo: videoInfoResponse,
streamingInfo: streamingInfoResponse,
timestamp: Date.now()
});
// Update state
setVideoInfo(videoInfoResponse);
setStreamingInfo(streamingInfoResponse);
setLoading('success');
} catch (err) {
if (controller.signal.aborted) {
return;
}
const videoError: VideoError = err instanceof Error
? { code: 'FETCH_ERROR', message: err.message, details: err }
: { code: 'UNKNOWN_ERROR', message: 'An unknown error occurred' };
setError(videoError);
setLoading('error');
} finally {
abortControllerRef.current = null;
}
}, [cacheKey, isCacheValid]);
/**
* Refetch video information
*/
const refetch = useCallback(async (): Promise<void> => {
if (!fileId) return;
await fetchVideoInfo(fileId);
}, [fileId, fetchVideoInfo]);
/**
* Clear cache for current video
*/
const clearCache = useCallback((): void => {
if (!fileId) return;
const key = `${cacheKey}_${fileId}`;
cacheRef.current.delete(key);
}, [fileId, cacheKey]);
/**
* Reset state
*/
const reset = useCallback((): void => {
setVideoInfo(null);
setStreamingInfo(null);
setLoading('idle');
setError(null);
}, []);
// Auto-fetch when fileId changes
useEffect(() => {
if (fileId && autoFetch) {
fetchVideoInfo(fileId);
} else if (!fileId) {
reset();
}
// Cleanup on unmount or fileId change
return () => {
if (abortControllerRef.current) {
abortControllerRef.current.abort();
}
};
}, [fileId, autoFetch, fetchVideoInfo, reset]);
// Cleanup cache periodically
useEffect(() => {
const interval = setInterval(() => {
for (const [key, value] of cacheRef.current.entries()) {
if (!isCacheValid(value.timestamp)) {
cacheRef.current.delete(key);
}
}
}, CACHE_DURATION);
return () => clearInterval(interval);
}, [isCacheValid, CACHE_DURATION]);
return {
videoInfo,
streamingInfo,
loading,
error,
refetch,
clearCache,
reset,
};
}

View File

@@ -1,262 +0,0 @@
/**
* useVideoList Hook
*
* Custom React hook for managing video list state, fetching, filtering, and pagination.
* Provides a clean interface for components to interact with video data.
*/
import { useState, useEffect, useCallback, useRef } from 'react';
import { videoApiService } from '../services/videoApi';
import {
type VideoFile,
type VideoListParams,
type VideoError,
type LoadingState,
type VideoListFilters,
type VideoListSortOptions
} from '../types';
export interface UseVideoListReturn {
videos: VideoFile[];
totalCount: number;
currentPage: number;
totalPages: number;
loading: LoadingState;
error: VideoError | null;
refetch: () => Promise<void>;
loadMore: () => Promise<void>;
hasMore: boolean;
goToPage: (page: number) => Promise<void>;
nextPage: () => Promise<void>;
previousPage: () => Promise<void>;
updateFilters: (filters: VideoListFilters) => void;
updateSort: (sortOptions: VideoListSortOptions) => void;
clearCache: () => void;
reset: () => void;
}
import { filterVideos, sortVideos } from '../utils/videoUtils';
interface UseVideoListOptions {
initialParams?: VideoListParams;
autoFetch?: boolean;
cacheKey?: string;
}
export function useVideoList(options: UseVideoListOptions = {}) {
const {
initialParams = {},
autoFetch = true,
cacheKey = 'default'
} = options;
// State
const [videos, setVideos] = useState<VideoFile[]>([]);
const [totalCount, setTotalCount] = useState(0);
const [currentPage, setCurrentPage] = useState(1);
const [totalPages, setTotalPages] = useState(0);
const [loading, setLoading] = useState<LoadingState>('idle');
const [error, setError] = useState<VideoError | null>(null);
const [hasMore, setHasMore] = useState(true);
const [currentParams, setCurrentParams] = useState<VideoListParams>(initialParams);
// Refs for cleanup and caching
const abortControllerRef = useRef<AbortController | null>(null);
const debounceTimeoutRef = useRef<NodeJS.Timeout | null>(null);
const CACHE_DURATION = 5 * 60 * 1000; // 5 minutes
/**
* Fetch videos from API
*/
const fetchVideos = useCallback(async (
params: VideoListParams = initialParams,
append: boolean = false
): Promise<void> => {
// Cancel any ongoing request
if (abortControllerRef.current) {
abortControllerRef.current.abort();
}
const controller = new AbortController();
abortControllerRef.current = controller;
try {
setLoading('loading');
setError(null);
// Fetch from API
const response = await videoApiService.getVideos(params);
// Check if request was aborted
if (controller.signal.aborted) {
return;
}
// Update state
setVideos(append ? prev => [...prev, ...response.videos] : response.videos);
setTotalCount(response.total_count);
// Update pagination state
if (response.page && response.total_pages) {
setCurrentPage(response.page);
setTotalPages(response.total_pages);
setHasMore(response.has_next || false);
} else {
// Fallback for offset-based pagination
setHasMore(response.videos.length === (params.limit || 50));
}
setLoading('success');
} catch (err) {
if (controller.signal.aborted) {
return;
}
const videoError: VideoError = err instanceof Error
? { code: 'FETCH_ERROR', message: err.message, details: err }
: { code: 'UNKNOWN_ERROR', message: 'An unknown error occurred' };
setError(videoError);
setLoading('error');
} finally {
abortControllerRef.current = null;
}
}, [initialParams]);
/**
* Refetch videos with current page
*/
const refetch = useCallback(async (): Promise<void> => {
const currentParams = {
...initialParams,
page: currentPage,
limit: initialParams.limit || 20,
};
await fetchVideos(currentParams, false);
}, [fetchVideos, initialParams, currentPage]);
/**
* Load more videos (pagination) - for backward compatibility
*/
const loadMore = useCallback(async (): Promise<void> => {
if (!hasMore || loading === 'loading') {
return;
}
const offset = videos.length;
const params = { ...initialParams, offset };
await fetchVideos(params, true);
}, [hasMore, loading, videos.length, initialParams, fetchVideos]);
/**
* Go to specific page
*/
const goToPage = useCallback(async (page: number): Promise<void> => {
if (page < 1 || (totalPages > 0 && page > totalPages) || loading === 'loading') {
return;
}
const params = { ...currentParams, page, limit: currentParams.limit || 20 };
setCurrentParams(params);
await fetchVideos(params, false);
}, [currentParams, totalPages, loading, fetchVideos]);
/**
* Go to next page
*/
const nextPage = useCallback(async (): Promise<void> => {
if (currentPage < totalPages) {
await goToPage(currentPage + 1);
}
}, [currentPage, totalPages, goToPage]);
/**
* Go to previous page
*/
const previousPage = useCallback(async (): Promise<void> => {
if (currentPage > 1) {
await goToPage(currentPage - 1);
}
}, [currentPage, goToPage]);
/**
* Update filters and refetch
*/
const updateFilters = useCallback((filters: VideoListFilters): void => {
const newParams: VideoListParams = {
...initialParams,
camera_name: filters.cameraName,
start_date: filters.dateRange?.start,
end_date: filters.dateRange?.end,
page: 1, // Reset to first page when filters change
limit: initialParams.limit || 20,
};
setCurrentParams(newParams);
fetchVideos(newParams, false);
}, [initialParams, fetchVideos]);
/**
* Update sort options and refetch
*/
const updateSort = useCallback((sortOptions: VideoListSortOptions): void => {
// Since the API doesn't support sorting, we'll sort locally
setVideos(prev => sortVideos(prev, sortOptions.field, sortOptions.direction));
}, []);
/**
* Clear cache (placeholder for future caching implementation)
*/
const clearCache = useCallback((): void => {
// TODO: Implement cache clearing when caching is added
console.log('Cache cleared');
}, []);
/**
* Reset to initial state
*/
const reset = useCallback((): void => {
setVideos([]);
setTotalCount(0);
setCurrentPage(1);
setTotalPages(0);
setLoading('idle');
setError(null);
setHasMore(true);
}, []);
// Auto-fetch on mount only
useEffect(() => {
if (autoFetch) {
fetchVideos(initialParams, false);
}
// Cleanup on unmount
return () => {
if (abortControllerRef.current) {
abortControllerRef.current.abort();
}
};
}, []); // Empty dependency array - only run once on mount
return {
videos,
totalCount,
currentPage,
totalPages,
loading,
error,
refetch,
loadMore,
hasMore,
// Pagination methods
goToPage,
nextPage,
previousPage,
// Additional utility methods
updateFilters,
updateSort,
clearCache,
reset,
};
}

View File

@@ -1,342 +0,0 @@
/**
* useVideoPlayer Hook
*
* Custom React hook for managing video player state and controls.
* Provides a comprehensive interface for video playback functionality.
*/
import { useState, useRef, useEffect, useCallback } from 'react';
// Video player state interface
export interface VideoPlayerState {
isPlaying: boolean;
currentTime: number;
duration: number;
volume: number;
isMuted: boolean;
isFullscreen: boolean;
isLoading: boolean;
error: string | null;
}
export interface UseVideoPlayerReturn {
state: VideoPlayerState;
actions: {
play: () => void;
pause: () => void;
togglePlay: () => void;
seek: (time: number) => void;
setVolume: (volume: number) => void;
toggleMute: () => void;
toggleFullscreen: () => void;
skip: (seconds: number) => void;
setPlaybackRate: (rate: number) => void;
reset: () => void;
};
ref: React.RefObject<HTMLVideoElement>;
}
interface UseVideoPlayerOptions {
autoPlay?: boolean;
loop?: boolean;
muted?: boolean;
volume?: number;
onPlay?: () => void;
onPause?: () => void;
onEnded?: () => void;
onError?: (error: string) => void;
onTimeUpdate?: (currentTime: number) => void;
onDurationChange?: (duration: number) => void;
}
export function useVideoPlayer(options: UseVideoPlayerOptions = {}) {
const {
autoPlay = false,
loop = false,
muted = false,
volume = 1,
onPlay,
onPause,
onEnded,
onError,
onTimeUpdate,
onDurationChange,
} = options;
// Video element ref
const videoRef = useRef<HTMLVideoElement>(null);
// Player state
const [state, setState] = useState<VideoPlayerState>({
isPlaying: false,
currentTime: 0,
duration: 0,
volume: volume,
isMuted: muted,
isFullscreen: false,
isLoading: false,
error: null,
});
/**
* Update state helper
*/
const updateState = useCallback((updates: Partial<VideoPlayerState>) => {
setState(prev => ({ ...prev, ...updates }));
}, []);
/**
* Play video
*/
const play = useCallback(async () => {
const video = videoRef.current;
if (!video) return;
try {
updateState({ isLoading: true, error: null });
await video.play();
updateState({ isPlaying: true, isLoading: false });
onPlay?.();
} catch (error) {
const errorMessage = error instanceof Error ? error.message : 'Failed to play video';
updateState({ isLoading: false, error: errorMessage });
onError?.(errorMessage);
}
}, [updateState, onPlay, onError]);
/**
* Pause video
*/
const pause = useCallback(() => {
const video = videoRef.current;
if (!video) return;
video.pause();
updateState({ isPlaying: false });
onPause?.();
}, [updateState, onPause]);
/**
* Toggle play/pause
*/
const togglePlay = useCallback(() => {
if (state.isPlaying) {
pause();
} else {
play();
}
}, [state.isPlaying, play, pause]);
/**
* Seek to specific time
*/
const seek = useCallback((time: number) => {
const video = videoRef.current;
if (!video) return;
video.currentTime = Math.max(0, Math.min(time, video.duration || 0));
}, []);
/**
* Set volume (0-1)
*/
const setVolume = useCallback((newVolume: number) => {
const video = videoRef.current;
if (!video) return;
const clampedVolume = Math.max(0, Math.min(1, newVolume));
video.volume = clampedVolume;
updateState({ volume: clampedVolume });
}, [updateState]);
/**
* Toggle mute
*/
const toggleMute = useCallback(() => {
const video = videoRef.current;
if (!video) return;
video.muted = !video.muted;
updateState({ isMuted: video.muted });
}, [updateState]);
/**
* Enter/exit fullscreen
*/
const toggleFullscreen = useCallback(async () => {
const video = videoRef.current;
if (!video) return;
try {
if (!document.fullscreenElement) {
await video.requestFullscreen();
updateState({ isFullscreen: true });
} else {
await document.exitFullscreen();
updateState({ isFullscreen: false });
}
} catch (error) {
console.warn('Fullscreen not supported or failed:', error);
}
}, [updateState]);
/**
* Skip forward/backward
*/
const skip = useCallback((seconds: number) => {
const video = videoRef.current;
if (!video) return;
const newTime = video.currentTime + seconds;
seek(newTime);
}, [seek]);
/**
* Set playback rate
*/
const setPlaybackRate = useCallback((rate: number) => {
const video = videoRef.current;
if (!video) return;
video.playbackRate = Math.max(0.25, Math.min(4, rate));
}, []);
/**
* Reset video to beginning
*/
const reset = useCallback(() => {
const video = videoRef.current;
if (!video) return;
video.currentTime = 0;
pause();
}, [pause]);
// Event handlers
useEffect(() => {
const video = videoRef.current;
if (!video) return;
const handleLoadStart = () => {
updateState({ isLoading: true, error: null });
// Set a timeout to detect if loading takes too long
const loadTimeout = setTimeout(() => {
if (video && video.readyState < 2) { // HAVE_CURRENT_DATA
updateState({
isLoading: false,
error: 'Video loading timeout. The video may not be accessible or there may be a network issue.'
});
}
}, 30000); // 30 second timeout
// Store timeout ID to clear it later
(video as any)._loadTimeout = loadTimeout;
};
const handleLoadedData = () => {
updateState({ isLoading: false });
// Clear the loading timeout
if ((video as any)._loadTimeout) {
clearTimeout((video as any)._loadTimeout);
(video as any)._loadTimeout = null;
}
};
const handleTimeUpdate = () => {
updateState({ currentTime: video.currentTime });
onTimeUpdate?.(video.currentTime);
};
const handleDurationChange = () => {
updateState({ duration: video.duration });
onDurationChange?.(video.duration);
};
const handlePlay = () => {
updateState({ isPlaying: true });
};
const handlePause = () => {
updateState({ isPlaying: false });
};
const handleEnded = () => {
updateState({ isPlaying: false });
onEnded?.();
};
const handleError = () => {
const errorMessage = video.error?.message || 'Video playback error';
updateState({ isLoading: false, error: errorMessage, isPlaying: false });
onError?.(errorMessage);
// Clear the loading timeout
if ((video as any)._loadTimeout) {
clearTimeout((video as any)._loadTimeout);
(video as any)._loadTimeout = null;
}
};
const handleVolumeChange = () => {
updateState({
volume: video.volume,
isMuted: video.muted
});
};
const handleFullscreenChange = () => {
updateState({ isFullscreen: !!document.fullscreenElement });
};
// Add event listeners
video.addEventListener('loadstart', handleLoadStart);
video.addEventListener('loadeddata', handleLoadedData);
video.addEventListener('timeupdate', handleTimeUpdate);
video.addEventListener('durationchange', handleDurationChange);
video.addEventListener('play', handlePlay);
video.addEventListener('pause', handlePause);
video.addEventListener('ended', handleEnded);
video.addEventListener('error', handleError);
video.addEventListener('volumechange', handleVolumeChange);
document.addEventListener('fullscreenchange', handleFullscreenChange);
// Set initial properties
video.autoplay = autoPlay;
video.loop = loop;
video.muted = muted;
video.volume = volume;
// Cleanup
return () => {
video.removeEventListener('loadstart', handleLoadStart);
video.removeEventListener('loadeddata', handleLoadedData);
video.removeEventListener('timeupdate', handleTimeUpdate);
video.removeEventListener('durationchange', handleDurationChange);
video.removeEventListener('play', handlePlay);
video.removeEventListener('pause', handlePause);
video.removeEventListener('ended', handleEnded);
video.removeEventListener('error', handleError);
video.removeEventListener('volumechange', handleVolumeChange);
document.removeEventListener('fullscreenchange', handleFullscreenChange);
};
}, [autoPlay, loop, muted, volume, updateState, onTimeUpdate, onDurationChange, onEnded, onError]);
return {
state,
actions: {
play,
pause,
togglePlay,
seek,
setVolume,
toggleMute,
toggleFullscreen,
skip,
setPlaybackRate,
reset,
},
ref: videoRef,
};
}

View File

@@ -1,24 +0,0 @@
/**
* Video Streaming Feature - Main Export
*
* This is the main entry point for the video streaming feature.
* It exports all the public APIs that other parts of the application can use.
*/
// Components
export * from './components';
// Hooks
export * from './hooks';
// Services
export { videoApiService, VideoApiService } from './services/videoApi';
// Types
export * from './types';
// Utils
export * from './utils/videoUtils';
// Main feature component
export { VideoStreamingPage } from './VideoStreamingPage';

View File

@@ -1,282 +0,0 @@
/**
* Video Streaming API Service
*
* This service handles all API interactions for the video streaming feature.
* It provides a clean interface for components to interact with the video API
* without knowing the implementation details.
*/
import {
type VideoListResponse,
type VideoInfoResponse,
type VideoStreamingInfo,
type VideoListParams,
type ThumbnailParams,
} from '../types';
import { performanceMonitor } from '../utils/performanceMonitor';
// Configuration - Prefer env var; default to relative "/api" so Vite proxy can route to the API container
const API_BASE_URL = import.meta.env.VITE_VISION_API_URL || '/api';
/**
* Custom error class for video API errors
*/
export class VideoApiError extends Error {
public code: string;
public details?: unknown;
constructor(
code: string,
message: string,
details?: unknown
) {
super(message);
this.name = 'VideoApiError';
this.code = code;
this.details = details;
}
}
/**
* Helper function to handle API responses
*/
async function handleApiResponse<T>(response: Response): Promise<T> {
if (!response.ok) {
const errorText = await response.text();
throw new VideoApiError(
`HTTP_${response.status}`,
`API request failed: ${response.statusText}`,
{ status: response.status, body: errorText }
);
}
const contentType = response.headers.get('content-type');
if (contentType && contentType.includes('application/json')) {
return response.json();
}
throw new VideoApiError(
'INVALID_RESPONSE',
'Expected JSON response from API'
);
}
/**
* Build query string from parameters
*/
function buildQueryString(params: VideoListParams | ThumbnailParams): string {
const searchParams = new URLSearchParams();
Object.entries(params).forEach(([key, value]) => {
if (value !== undefined && value !== null) {
searchParams.append(key, String(value));
}
});
return searchParams.toString();
}
/**
* Video API Service Class
*/
export class VideoApiService {
private baseUrl: string;
constructor(baseUrl: string = API_BASE_URL) {
this.baseUrl = baseUrl;
}
/**
* Get total count of videos with filters (without pagination)
*/
private totalCountCache = new Map<string, { count: number; timestamp: number }>();
private readonly CACHE_DURATION = 30000; // 30 seconds cache
private async getTotalCount(params: Omit<VideoListParams, 'limit' | 'offset' | 'page'>): Promise<number> {
// Create cache key from params
const cacheKey = JSON.stringify(params);
const cached = this.totalCountCache.get(cacheKey);
// Return cached result if still valid
if (cached && Date.now() - cached.timestamp < this.CACHE_DURATION) {
return cached.count;
}
const queryString = buildQueryString({ ...params, limit: 1000 }); // Use high limit to get accurate total
const url = `${this.baseUrl}/videos/${queryString ? `?${queryString}` : ''}`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Accept': 'application/json',
},
});
const result = await handleApiResponse<VideoListResponse>(response);
const count = result.videos.length; // Since backend returns wrong total_count, count the actual videos
// Cache the result
this.totalCountCache.set(cacheKey, { count, timestamp: Date.now() });
return count;
}
/**
* Get list of videos with optional filtering
*/
async getVideos(params: VideoListParams = {}): Promise<VideoListResponse> {
return performanceMonitor.trackOperation('get_videos', async () => {
// Convert page-based params to offset-based for API compatibility
const apiParams = { ...params };
// If page is provided, convert to offset
if (params.page && params.limit) {
apiParams.offset = (params.page - 1) * params.limit;
delete apiParams.page; // Remove page param as API expects offset
}
const queryString = buildQueryString(apiParams);
const url = `${this.baseUrl}/videos/${queryString ? `?${queryString}` : ''}`;
const response = await fetch(url, {
method: 'GET',
headers: {
'Accept': 'application/json',
},
});
const result = await handleApiResponse<VideoListResponse>(response);
// Add pagination metadata if page was requested
if (params.page && params.limit) {
// Get accurate total count by calling without pagination
const totalCount = await this.getTotalCount({
camera_name: params.camera_name,
start_date: params.start_date,
end_date: params.end_date,
include_metadata: params.include_metadata,
});
const totalPages = Math.ceil(totalCount / params.limit);
return {
...result,
total_count: totalCount, // Use accurate total count
page: params.page,
total_pages: totalPages,
has_next: params.page < totalPages,
has_previous: params.page > 1,
};
}
return result;
}, { params });
}
/**
* Get detailed information about a specific video
*/
async getVideoInfo(fileId: string): Promise<VideoInfoResponse> {
try {
const response = await fetch(`${this.baseUrl}/videos/${fileId}`, {
method: 'GET',
headers: {
'Accept': 'application/json',
},
});
return await handleApiResponse<VideoInfoResponse>(response);
} catch (error) {
if (error instanceof VideoApiError) {
throw error;
}
throw new VideoApiError(
'NETWORK_ERROR',
`Failed to fetch video info for ${fileId}`,
{ originalError: error, fileId }
);
}
}
/**
* Get streaming information for a video
*/
async getStreamingInfo(fileId: string): Promise<VideoStreamingInfo> {
try {
const response = await fetch(`${this.baseUrl}/videos/${fileId}/info`, {
method: 'GET',
headers: {
'Accept': 'application/json',
},
});
return await handleApiResponse<VideoStreamingInfo>(response);
} catch (error) {
if (error instanceof VideoApiError) {
throw error;
}
throw new VideoApiError(
'NETWORK_ERROR',
`Failed to fetch streaming info for ${fileId}`,
{ originalError: error, fileId }
);
}
}
/**
* Get the streaming URL for a video
*/
getStreamingUrl(fileId: string): string {
return `${this.baseUrl}/videos/${fileId}/stream`;
}
/**
* Get the thumbnail URL for a video
*/
getThumbnailUrl(fileId: string, params: ThumbnailParams = {}): string {
const queryString = buildQueryString(params);
return `${this.baseUrl}/videos/${fileId}/thumbnail${queryString ? `?${queryString}` : ''}`;
}
/**
* Download thumbnail as blob
*/
async getThumbnailBlob(fileId: string, params: ThumbnailParams = {}): Promise<Blob> {
return performanceMonitor.trackOperation('get_thumbnail', async () => {
const url = this.getThumbnailUrl(fileId, params);
const response = await fetch(url);
if (!response.ok) {
throw new VideoApiError(
`HTTP_${response.status}`,
`Failed to fetch thumbnail: ${response.statusText}`,
{ status: response.status, fileId }
);
}
return await response.blob();
}, { fileId, params });
}
/**
* Check if the video API is available
*/
async healthCheck(): Promise<boolean> {
try {
const response = await fetch(`${this.baseUrl}/videos/`, {
method: 'GET',
headers: {
'Accept': 'application/json',
},
});
return response.ok;
} catch {
return false;
}
}
}
// Export a default instance
export const videoApiService = new VideoApiService();
// Export utility functions
export { buildQueryString, handleApiResponse };

View File

@@ -1,163 +0,0 @@
/**
* Video Streaming Feature Types
*
* This file contains all TypeScript type definitions for the video streaming feature.
* Following the modular architecture pattern where types are centralized and reusable.
* Updated to fix import issues.
*/
// Base video information from the API
export interface VideoFile {
file_id: string;
camera_name: string;
filename: string;
file_size_bytes: number;
format: string;
status: 'completed' | 'processing' | 'failed';
created_at: string;
is_streamable: boolean;
needs_conversion: boolean;
}
// Extended video information with metadata
export interface VideoWithMetadata extends VideoFile {
metadata?: {
duration_seconds: number;
width: number;
height: number;
fps: number;
codec: string;
aspect_ratio: number;
};
}
// API response for video list
export interface VideoListResponse {
videos: VideoFile[];
total_count: number;
page?: number;
total_pages?: number;
has_next?: boolean;
has_previous?: boolean;
}
// API response for video info
export interface VideoInfoResponse {
file_id: string;
metadata: {
duration_seconds: number;
width: number;
height: number;
fps: number;
codec: string;
aspect_ratio: number;
};
}
// Streaming technical information
export interface VideoStreamingInfo {
file_id: string;
file_size_bytes: number;
content_type: string;
supports_range_requests: boolean;
chunk_size_bytes: number;
}
// Query parameters for video list API
export interface VideoListParams {
camera_name?: string;
start_date?: string;
end_date?: string;
limit?: number;
include_metadata?: boolean;
page?: number;
offset?: number;
}
// Thumbnail request parameters
export interface ThumbnailParams {
timestamp?: number;
width?: number;
height?: number;
}
// Video player state is now defined in useVideoPlayer hook to avoid circular imports
// Video list filter and sort options
export interface VideoListFilters {
cameraName?: string;
dateRange?: {
start: string;
end: string;
};
status?: VideoFile['status'];
format?: string;
}
export interface VideoListSortOptions {
field: 'created_at' | 'file_size_bytes' | 'camera_name' | 'filename';
direction: 'asc' | 'desc';
}
// Component props interfaces
export interface VideoPlayerProps {
fileId: string;
autoPlay?: boolean;
controls?: boolean;
width?: string | number;
height?: string | number;
className?: string;
onPlay?: () => void;
onPause?: () => void;
onEnded?: () => void;
onError?: (error: string) => void;
}
export interface VideoCardProps {
video: VideoFile;
onClick?: (video: VideoFile) => void;
showMetadata?: boolean;
className?: string;
}
export interface VideoListProps {
filters?: VideoListFilters;
sortOptions?: VideoListSortOptions;
limit?: number;
onVideoSelect?: (video: VideoFile) => void;
className?: string;
}
// Pagination component props
export interface PaginationProps {
currentPage: number;
totalPages: number;
onPageChange: (page: number) => void;
showFirstLast?: boolean;
showPrevNext?: boolean;
maxVisiblePages?: number;
className?: string;
}
export interface VideoThumbnailProps {
fileId: string;
timestamp?: number;
width?: number;
height?: number;
alt?: string;
className?: string;
onClick?: () => void;
}
// Error types
export interface VideoError {
code: string;
message: string;
details?: any;
}
// Loading states
export type LoadingState = 'idle' | 'loading' | 'success' | 'error';
// Hook return types are exported from their respective hook files
// This avoids circular import issues

View File

@@ -1,9 +0,0 @@
/**
* Video Streaming Utils - Index
*
* Centralized export for all video streaming utilities.
*/
export * from './videoUtils';
export * from './thumbnailCache';
export * from './performanceMonitor';

View File

@@ -1,197 +0,0 @@
/**
* Performance Monitor for Video Streaming
*
* Tracks and reports performance metrics for video streaming operations.
*/
interface PerformanceMetric {
operation: string;
startTime: number;
endTime?: number;
duration?: number;
success: boolean;
error?: string;
metadata?: Record<string, any>;
}
interface PerformanceStats {
totalOperations: number;
successfulOperations: number;
failedOperations: number;
averageDuration: number;
minDuration: number;
maxDuration: number;
successRate: number;
}
export class PerformanceMonitor {
private metrics: PerformanceMetric[] = [];
private maxMetrics: number;
constructor(maxMetrics: number = 1000) {
this.maxMetrics = maxMetrics;
}
/**
* Start tracking an operation
*/
startOperation(operation: string, metadata?: Record<string, any>): string {
const id = `${operation}_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
const metric: PerformanceMetric = {
operation,
startTime: performance.now(),
success: false,
metadata,
};
this.metrics.push(metric);
// Keep only the most recent metrics
if (this.metrics.length > this.maxMetrics) {
this.metrics.shift();
}
return id;
}
/**
* End tracking an operation
*/
endOperation(operation: string, success: boolean, error?: string): void {
const metric = this.metrics
.slice()
.reverse()
.find(m => m.operation === operation && !m.endTime);
if (metric) {
metric.endTime = performance.now();
metric.duration = metric.endTime - metric.startTime;
metric.success = success;
metric.error = error;
}
}
/**
* Track a complete operation
*/
async trackOperation<T>(
operation: string,
fn: () => Promise<T>,
metadata?: Record<string, any>
): Promise<T> {
this.startOperation(operation, metadata);
try {
const result = await fn();
this.endOperation(operation, true);
return result;
} catch (error) {
this.endOperation(operation, false, error instanceof Error ? error.message : 'Unknown error');
throw error;
}
}
/**
* Get performance statistics for a specific operation
*/
getStats(operation?: string): PerformanceStats {
const filteredMetrics = operation
? this.metrics.filter(m => m.operation === operation && m.duration !== undefined)
: this.metrics.filter(m => m.duration !== undefined);
if (filteredMetrics.length === 0) {
return {
totalOperations: 0,
successfulOperations: 0,
failedOperations: 0,
averageDuration: 0,
minDuration: 0,
maxDuration: 0,
successRate: 0,
};
}
const durations = filteredMetrics.map(m => m.duration!);
const successfulOps = filteredMetrics.filter(m => m.success).length;
const failedOps = filteredMetrics.length - successfulOps;
return {
totalOperations: filteredMetrics.length,
successfulOperations: successfulOps,
failedOperations: failedOps,
averageDuration: durations.reduce((sum, d) => sum + d, 0) / durations.length,
minDuration: Math.min(...durations),
maxDuration: Math.max(...durations),
successRate: successfulOps / filteredMetrics.length,
};
}
/**
* Get recent metrics
*/
getRecentMetrics(count: number = 10): PerformanceMetric[] {
return this.metrics
.filter(m => m.duration !== undefined)
.slice(-count)
.reverse();
}
/**
* Clear all metrics
*/
clear(): void {
this.metrics = [];
}
/**
* Export metrics for analysis
*/
exportMetrics(): PerformanceMetric[] {
return [...this.metrics];
}
/**
* Get a performance report
*/
getReport(): string {
const operations = [...new Set(this.metrics.map(m => m.operation))];
let report = 'Video Streaming Performance Report\n';
report += '=====================================\n\n';
for (const operation of operations) {
const stats = this.getStats(operation);
report += `${operation}:\n`;
report += ` Total Operations: ${stats.totalOperations}\n`;
report += ` Success Rate: ${(stats.successRate * 100).toFixed(1)}%\n`;
report += ` Average Duration: ${stats.averageDuration.toFixed(2)}ms\n`;
report += ` Min Duration: ${stats.minDuration.toFixed(2)}ms\n`;
report += ` Max Duration: ${stats.maxDuration.toFixed(2)}ms\n\n`;
}
return report;
}
}
// Create a singleton instance
export const performanceMonitor = new PerformanceMonitor();
// Helper functions for common operations
export const trackVideoLoad = (fileId: string) =>
performanceMonitor.startOperation('video_load', { fileId });
export const trackThumbnailLoad = (fileId: string, width: number, height: number) =>
performanceMonitor.startOperation('thumbnail_load', { fileId, width, height });
export const trackApiCall = (endpoint: string) =>
performanceMonitor.startOperation('api_call', { endpoint });
// Log performance stats periodically in development
if (process.env.NODE_ENV === 'development') {
setInterval(() => {
const stats = performanceMonitor.getStats();
if (stats.totalOperations > 0) {
console.log('Video Streaming Performance:', stats);
}
}, 60000); // Every minute
}

View File

@@ -1,224 +0,0 @@
/**
* Thumbnail Cache Utility
*
* Provides efficient caching for video thumbnails to improve performance
* and reduce API calls.
*/
interface CacheEntry {
blob: Blob;
url: string;
timestamp: number;
accessCount: number;
lastAccessed: number;
}
interface ThumbnailCacheOptions {
maxSize: number; // Maximum number of cached thumbnails
maxAge: number; // Maximum age in milliseconds
maxMemory: number; // Maximum memory usage in bytes
}
export class ThumbnailCache {
private cache = new Map<string, CacheEntry>();
private options: ThumbnailCacheOptions;
constructor(options: Partial<ThumbnailCacheOptions> = {}) {
this.options = {
maxSize: options.maxSize || 100,
maxAge: options.maxAge || 30 * 60 * 1000, // 30 minutes
maxMemory: options.maxMemory || 50 * 1024 * 1024, // 50MB
};
}
/**
* Generate cache key for a thumbnail
*/
private generateKey(fileId: string, timestamp: number, width: number, height: number): string {
return `${fileId}_${timestamp}_${width}x${height}`;
}
/**
* Get thumbnail from cache
*/
get(fileId: string, timestamp: number, width: number, height: number): string | null {
const key = this.generateKey(fileId, timestamp, width, height);
const entry = this.cache.get(key);
if (!entry) {
return null;
}
// Check if entry is expired
const now = Date.now();
if (now - entry.timestamp > this.options.maxAge) {
this.delete(key);
return null;
}
// Update access statistics
entry.accessCount++;
entry.lastAccessed = now;
return entry.url;
}
/**
* Store thumbnail in cache
*/
set(fileId: string, timestamp: number, width: number, height: number, blob: Blob): string {
const key = this.generateKey(fileId, timestamp, width, height);
const url = URL.createObjectURL(blob);
const now = Date.now();
// Clean up existing entry if it exists
const existingEntry = this.cache.get(key);
if (existingEntry) {
URL.revokeObjectURL(existingEntry.url);
}
// Create new entry
const entry: CacheEntry = {
blob,
url,
timestamp: now,
accessCount: 1,
lastAccessed: now,
};
this.cache.set(key, entry);
// Cleanup if necessary
this.cleanup();
return url;
}
/**
* Delete specific entry from cache
*/
delete(key: string): boolean {
const entry = this.cache.get(key);
if (entry) {
URL.revokeObjectURL(entry.url);
return this.cache.delete(key);
}
return false;
}
/**
* Clear all cached thumbnails
*/
clear(): void {
for (const entry of this.cache.values()) {
URL.revokeObjectURL(entry.url);
}
this.cache.clear();
}
/**
* Get cache statistics
*/
getStats() {
const entries = Array.from(this.cache.values());
const totalSize = entries.reduce((sum, entry) => sum + entry.blob.size, 0);
const totalAccess = entries.reduce((sum, entry) => sum + entry.accessCount, 0);
return {
size: this.cache.size,
totalMemory: totalSize,
totalAccess,
averageSize: entries.length > 0 ? totalSize / entries.length : 0,
averageAccess: entries.length > 0 ? totalAccess / entries.length : 0,
};
}
/**
* Cleanup expired and least used entries
*/
private cleanup(): void {
const now = Date.now();
const entries = Array.from(this.cache.entries());
// Remove expired entries
for (const [key, entry] of entries) {
if (now - entry.timestamp > this.options.maxAge) {
this.delete(key);
}
}
// Check if we need to remove more entries
if (this.cache.size <= this.options.maxSize) {
const stats = this.getStats();
if (stats.totalMemory <= this.options.maxMemory) {
return; // No cleanup needed
}
}
// Sort by access frequency and recency (LRU with access count)
const sortedEntries = Array.from(this.cache.entries()).sort(([, a], [, b]) => {
// Prioritize by access count, then by last accessed time
const scoreA = a.accessCount * 1000 + (a.lastAccessed / 1000);
const scoreB = b.accessCount * 1000 + (b.lastAccessed / 1000);
return scoreA - scoreB; // Ascending order (least valuable first)
});
// Remove least valuable entries until we're under limits
while (
(this.cache.size > this.options.maxSize ||
this.getStats().totalMemory > this.options.maxMemory) &&
sortedEntries.length > 0
) {
const [key] = sortedEntries.shift()!;
this.delete(key);
}
}
/**
* Preload thumbnails for a list of videos
*/
async preload(
videos: Array<{ file_id: string }>,
getThumbnailBlob: (fileId: string, params: any) => Promise<Blob>,
options: { timestamp?: number; width?: number; height?: number } = {}
): Promise<void> {
const { timestamp = 1.0, width = 320, height = 240 } = options;
const promises = videos.slice(0, 10).map(async (video) => {
const key = this.generateKey(video.file_id, timestamp, width, height);
// Skip if already cached
if (this.cache.has(key)) {
return;
}
try {
const blob = await getThumbnailBlob(video.file_id, {
timestamp,
width,
height,
});
this.set(video.file_id, timestamp, width, height, blob);
} catch (error) {
// Silently fail for preloading
console.warn(`Failed to preload thumbnail for ${video.file_id}:`, error);
}
});
await Promise.allSettled(promises);
}
}
// Create a singleton instance
export const thumbnailCache = new ThumbnailCache({
maxSize: 100,
maxAge: 30 * 60 * 1000, // 30 minutes
maxMemory: 50 * 1024 * 1024, // 50MB
});
// Cleanup on page unload
if (typeof window !== 'undefined') {
window.addEventListener('beforeunload', () => {
thumbnailCache.clear();
});
}

View File

@@ -1,295 +0,0 @@
/**
* Video Streaming Utilities
*
* Pure utility functions for video operations, formatting, and data processing.
* These functions have no side effects and can be easily tested.
* Enhanced with MP4 format support and improved file handling.
*/
import { type VideoFile, type VideoWithMetadata } from '../types';
import {
isVideoFile as isVideoFileUtil,
getVideoMimeType as getVideoMimeTypeUtil,
getVideoFormat,
isWebCompatibleFormat,
getFormatDisplayName as getFormatDisplayNameUtil
} from '../../../utils/videoFileUtils';
/**
* Format file size in bytes to human readable format
*/
export function formatFileSize(bytes: number): string {
if (bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB', 'TB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(1))} ${sizes[i]}`;
}
/**
* Format duration in seconds to human readable format (HH:MM:SS or MM:SS)
*/
export function formatDuration(seconds: number): string {
if (isNaN(seconds) || seconds < 0) return '00:00';
const hours = Math.floor(seconds / 3600);
const minutes = Math.floor((seconds % 3600) / 60);
const secs = Math.floor(seconds % 60);
if (hours > 0) {
return `${hours.toString().padStart(2, '0')}:${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
}
return `${minutes.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
}
/**
* Format date string to human readable format
*/
export function formatVideoDate(dateString: string): string {
try {
const date = new Date(dateString);
return date.toLocaleString();
} catch {
return dateString;
}
}
/**
* Get relative time string (e.g., "2 hours ago")
*/
export function getRelativeTime(dateString: string): string {
try {
const date = new Date(dateString);
const now = new Date();
const diffMs = now.getTime() - date.getTime();
const diffMinutes = Math.floor(diffMs / (1000 * 60));
const diffHours = Math.floor(diffMinutes / 60);
const diffDays = Math.floor(diffHours / 24);
if (diffMinutes < 1) return 'Just now';
if (diffMinutes < 60) return `${diffMinutes} minute${diffMinutes === 1 ? '' : 's'} ago`;
if (diffHours < 24) return `${diffHours} hour${diffHours === 1 ? '' : 's'} ago`;
if (diffDays < 7) return `${diffDays} day${diffDays === 1 ? '' : 's'} ago`;
return formatVideoDate(dateString);
} catch {
return dateString;
}
}
/**
* Check if a filename is a video file (supports MP4, AVI, and other formats)
*/
export function isVideoFile(filename: string): boolean {
return isVideoFileUtil(filename);
}
/**
* Get MIME type for video file based on filename
*/
export function getVideoMimeType(filename: string): string {
return getVideoMimeTypeUtil(filename);
}
/**
* Extract camera name from filename if not provided
*/
export function extractCameraName(filename: string): string {
// Try to extract camera name from filename pattern like "camera1_recording_20250804_143022.avi"
const match = filename.match(/^([^_]+)_/);
return match ? match[1] : 'Unknown';
}
/**
* Get video format display name
*/
export function getFormatDisplayName(format: string): string {
return getFormatDisplayNameUtil(format);
}
/**
* Check if video format is web-compatible
*/
export function isWebCompatible(format: string): boolean {
return isWebCompatibleFormat(format);
}
/**
* Get status badge color class
*/
export function getStatusBadgeClass(status: VideoFile['status']): string {
const statusClasses = {
'completed': 'bg-green-100 text-green-800',
'processing': 'bg-yellow-100 text-yellow-800',
'failed': 'bg-red-100 text-red-800',
};
return statusClasses[status] || 'bg-gray-100 text-gray-800';
}
/**
* Get video resolution display string
*/
export function getResolutionString(width?: number, height?: number): string {
if (!width || !height) return 'Unknown';
// Common resolution names
const resolutions: Record<string, string> = {
'1920x1080': '1080p',
'1280x720': '720p',
'854x480': '480p',
'640x360': '360p',
'426x240': '240p',
};
const key = `${width}x${height}`;
return resolutions[key] || `${width}×${height}`;
}
/**
* Calculate aspect ratio string
*/
export function getAspectRatioString(aspectRatio: number): string {
if (!aspectRatio || aspectRatio <= 0) return 'Unknown';
// Common aspect ratios
const ratios: Array<[number, string]> = [
[16/9, '16:9'],
[4/3, '4:3'],
[21/9, '21:9'],
[1, '1:1'],
];
// Find closest match (within 0.1 tolerance)
for (const [ratio, display] of ratios) {
if (Math.abs(aspectRatio - ratio) < 0.1) {
return display;
}
}
// Return calculated ratio
const gcd = (a: number, b: number): number => b === 0 ? a : gcd(b, a % b);
const width = Math.round(aspectRatio * 100);
const height = 100;
const divisor = gcd(width, height);
return `${width / divisor}:${height / divisor}`;
}
/**
* Sort videos by different criteria
*/
export function sortVideos(
videos: VideoFile[],
field: 'created_at' | 'file_size_bytes' | 'camera_name' | 'filename',
direction: 'asc' | 'desc' = 'desc'
): VideoFile[] {
return [...videos].sort((a, b) => {
let aValue: any = a[field];
let bValue: any = b[field];
// Handle date strings
if (field === 'created_at') {
aValue = new Date(aValue).getTime();
bValue = new Date(bValue).getTime();
}
// Handle string comparison
if (typeof aValue === 'string' && typeof bValue === 'string') {
aValue = aValue.toLowerCase();
bValue = bValue.toLowerCase();
}
let result = 0;
if (aValue < bValue) result = -1;
else if (aValue > bValue) result = 1;
return direction === 'desc' ? -result : result;
});
}
/**
* Filter videos by criteria
*/
export function filterVideos(
videos: VideoFile[],
filters: {
cameraName?: string;
status?: VideoFile['status'];
format?: string;
dateRange?: { start: string; end: string };
}
): VideoFile[] {
return videos.filter(video => {
// Filter by camera name
if (filters.cameraName && video.camera_name !== filters.cameraName) {
return false;
}
// Filter by status
if (filters.status && video.status !== filters.status) {
return false;
}
// Filter by format
if (filters.format && video.format !== filters.format) {
return false;
}
// Filter by date range
if (filters.dateRange) {
const videoDate = new Date(video.created_at);
const startDate = new Date(filters.dateRange.start);
const endDate = new Date(filters.dateRange.end);
if (videoDate < startDate || videoDate > endDate) {
return false;
}
}
return true;
});
}
/**
* Generate a unique key for video caching
*/
export function generateVideoKey(fileId: string, params?: Record<string, any>): string {
if (!params || Object.keys(params).length === 0) {
return fileId;
}
const sortedParams = Object.keys(params)
.sort()
.map(key => `${key}=${params[key]}`)
.join('&');
return `${fileId}?${sortedParams}`;
}
/**
* Validate video file ID format
*/
export function isValidFileId(fileId: string): boolean {
// Basic validation - adjust based on your file ID format
return typeof fileId === 'string' && fileId.length > 0 && !fileId.includes('/');
}
/**
* Get video thumbnail timestamp suggestions
*/
export function getThumbnailTimestamps(duration: number): number[] {
if (duration <= 0) return [0];
// Generate timestamps at 10%, 25%, 50%, 75%, 90% of video duration
return [
Math.floor(duration * 0.1),
Math.floor(duration * 0.25),
Math.floor(duration * 0.5),
Math.floor(duration * 0.75),
Math.floor(duration * 0.9),
].filter(t => t >= 0 && t < duration);
}

View File

@@ -0,0 +1,26 @@
export type FeatureFlags = {
enableShell: boolean
enableVideoModule: boolean
enableExperimentModule: boolean
enableCameraModule: boolean
}
const toBool = (v: unknown, fallback = false): boolean => {
if (typeof v === 'string') {
const s = v.trim().toLowerCase()
return s === '1' || s === 'true' || s === 'yes' || s === 'on'
}
if (typeof v === 'boolean') return v
return fallback
}
export const featureFlags: FeatureFlags = {
enableShell: toBool(import.meta.env.VITE_ENABLE_SHELL ?? false),
enableVideoModule: toBool(import.meta.env.VITE_ENABLE_VIDEO_MODULE ?? false),
enableExperimentModule: toBool(import.meta.env.VITE_ENABLE_EXPERIMENT_MODULE ?? false),
enableCameraModule: toBool(import.meta.env.VITE_ENABLE_CAMERA_MODULE ?? false),
}
export const isFeatureEnabled = (flag: keyof FeatureFlags): boolean => featureFlags[flag]

View File

@@ -0,0 +1,14 @@
import React from 'react'
export function loadRemoteComponent<T>(
enabled: boolean,
remoteImport: () => Promise<{ default: T }>,
localComponent: T
): T {
if (enabled) {
return React.lazy(remoteImport) as T
}
return localComponent
}

View File

@@ -0,0 +1,4 @@
// analytics module public API placeholder
export {}

View File

@@ -0,0 +1,4 @@
// camera module public API placeholder
export {}

View File

@@ -0,0 +1,4 @@
// experiments module public API placeholder
export {}

View File

@@ -0,0 +1,4 @@
// scheduling module public API placeholder
export {}

View File

@@ -0,0 +1,4 @@
// users module public API placeholder
export {}

View File

@@ -0,0 +1,4 @@
// video module public API placeholder
export {}

View File

@@ -0,0 +1,28 @@
type HttpMethod = 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE'
const BASE_URL = import.meta.env.VITE_API_BASE_URL || 'http://localhost:8000'
async function request<T>(path: string, method: HttpMethod = 'GET', body?: unknown): Promise<T> {
const res = await fetch(`${BASE_URL}${path}`, {
method,
headers: {
'Content-Type': 'application/json'
},
body: body ? JSON.stringify(body) : undefined,
})
if (!res.ok) {
const text = await res.text().catch(() => '')
throw new Error(text || `Request failed: ${res.status}`)
}
return (await res.json()) as T
}
export const apiClient = {
get: <T>(path: string) => request<T>(path, 'GET'),
post: <T>(path: string, body?: unknown) => request<T>(path, 'POST', body),
put: <T>(path: string, body?: unknown) => request<T>(path, 'PUT', body),
patch: <T>(path: string, body?: unknown) => request<T>(path, 'PATCH', body),
delete: <T>(path: string) => request<T>(path, 'DELETE'),
}

View File

@@ -0,0 +1,4 @@
export { apiClient } from './client'

View File

@@ -0,0 +1,30 @@
import { apiClient } from './client'
import type { VideoListParams, VideoListResponse } from '../types/video'
const BASE = (import.meta as any).env?.VITE_VISION_API_URL || '/api'
export const videoApi = {
async list(params: VideoListParams = {}): Promise<VideoListResponse> {
const search = new URLSearchParams()
Object.entries(params).forEach(([k, v]) => {
if (v !== undefined && v !== null) search.append(k, String(v))
})
const qs = search.toString()
return apiClient.get(`${BASE}/videos/${qs ? `?${qs}` : ''}`)
},
streamingUrl(fileId: string): string {
return `${BASE}/videos/${fileId}/stream`
},
thumbnailUrl(fileId: string, params: { timestamp?: number; width?: number; height?: number } = {}): string {
const search = new URLSearchParams()
Object.entries(params).forEach(([k, v]) => {
if (v !== undefined && v !== null) search.append(k, String(v))
})
const qs = search.toString()
return `${BASE}/videos/${fileId}/thumbnail${qs ? `?${qs}` : ''}`
}
}

View File

@@ -0,0 +1,4 @@
export { supabase, userManagement, type User } from '../../lib/supabase'
export { useAuth } from '../../hooks/useAuth'

View File

@@ -0,0 +1,6 @@
export * from './types'
export * from './ui'
export * from './api'
export * from './auth'

View File

@@ -0,0 +1,6 @@
export type ApiResult<T> = { success: true; data: T } | { success: false; error: string }
export type UserRole = 'admin' | 'conductor' | 'user'

View File

@@ -0,0 +1,34 @@
export interface VideoFile {
file_id: string
camera_name: string
filename: string
file_size_bytes: number
format: string
status: 'completed' | 'processing' | 'failed'
created_at: string
is_streamable: boolean
needs_conversion: boolean
}
export interface VideoListResponse {
videos: VideoFile[]
total_count: number
page?: number
total_pages?: number
has_next?: boolean
has_previous?: boolean
}
export interface VideoListParams {
camera_name?: string
start_date?: string
end_date?: string
limit?: number
include_metadata?: boolean
page?: number
offset?: number
}

View File

@@ -0,0 +1,4 @@
export { PrimaryButton } from './widgets/PrimaryButton'

View File

@@ -0,0 +1,19 @@
import type { ButtonHTMLAttributes, PropsWithChildren } from 'react'
type PrimaryButtonProps = PropsWithChildren<ButtonHTMLAttributes<HTMLButtonElement>> & {
loading?: boolean
}
export function PrimaryButton({ children, loading = false, className = '', ...rest }: PrimaryButtonProps) {
return (
<button
{...rest}
className={`px-4 py-2 rounded-lg bg-indigo-600 text-white hover:bg-indigo-700 disabled:opacity-50 ${className}`}
disabled={loading || rest.disabled}
>
{loading ? 'Please wait…' : children}
</button>
)
}

View File

@@ -1,156 +0,0 @@
/**
* Video Streaming API Test
*
* This test script verifies the video streaming functionality
* and API connectivity with the USDA Vision Camera System.
*/
import { videoApiService } from '../features/video-streaming/services/videoApi';
export interface TestResult {
test: string;
success: boolean;
message: string;
data?: any;
}
export class VideoStreamingTester {
private results: TestResult[] = [];
async runAllTests(): Promise<TestResult[]> {
this.results = [];
console.log('🧪 Starting Video Streaming API Tests');
console.log('=====================================');
await this.testApiConnectivity();
await this.testVideoList();
await this.testVideoInfo();
await this.testStreamingUrls();
console.log('\n📊 Test Results Summary:');
console.log('========================');
const passed = this.results.filter(r => r.success).length;
const total = this.results.length;
this.results.forEach(result => {
const icon = result.success ? '✅' : '❌';
console.log(`${icon} ${result.test}: ${result.message}`);
});
console.log(`\n🎯 Tests Passed: ${passed}/${total}`);
return this.results;
}
private async testApiConnectivity(): Promise<void> {
try {
console.log('\n🔗 Testing API Connectivity...');
const isHealthy = await videoApiService.healthCheck();
if (isHealthy) {
this.addResult('API Connectivity', true, 'Successfully connected to video API');
} else {
this.addResult('API Connectivity', false, 'API is not responding');
}
} catch (error) {
this.addResult('API Connectivity', false, `Connection failed: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
private async testVideoList(): Promise<void> {
try {
console.log('\n📋 Testing Video List...');
const response = await videoApiService.getVideos({
limit: 5,
include_metadata: true
});
if (response && typeof response.total_count === 'number') {
this.addResult('Video List', true, `Found ${response.total_count} videos, retrieved ${response.videos.length} items`, response);
} else {
this.addResult('Video List', false, 'Invalid response format');
}
} catch (error) {
this.addResult('Video List', false, `Failed to fetch videos: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
private async testVideoInfo(): Promise<void> {
try {
console.log('\n📹 Testing Video Info...');
// First get a video list to test with
const videoList = await videoApiService.getVideos({ limit: 1 });
if (videoList.videos.length === 0) {
this.addResult('Video Info', false, 'No videos available to test with');
return;
}
const firstVideo = videoList.videos[0];
const videoInfo = await videoApiService.getVideoInfo(firstVideo.file_id);
if (videoInfo && videoInfo.file_id === firstVideo.file_id) {
this.addResult('Video Info', true, `Successfully retrieved info for ${firstVideo.file_id}`, videoInfo);
} else {
this.addResult('Video Info', false, 'Invalid video info response');
}
} catch (error) {
this.addResult('Video Info', false, `Failed to fetch video info: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
private async testStreamingUrls(): Promise<void> {
try {
console.log('\n🎬 Testing Streaming URLs...');
// Get a video to test with
const videoList = await videoApiService.getVideos({ limit: 1 });
if (videoList.videos.length === 0) {
this.addResult('Streaming URLs', false, 'No videos available to test with');
return;
}
const firstVideo = videoList.videos[0];
// Test streaming URL generation
const streamingUrl = videoApiService.getStreamingUrl(firstVideo.file_id);
const thumbnailUrl = videoApiService.getThumbnailUrl(firstVideo.file_id, {
timestamp: 1.0,
width: 320,
height: 240
});
if (streamingUrl && thumbnailUrl) {
this.addResult('Streaming URLs', true, `Generated URLs for ${firstVideo.file_id}`, {
streamingUrl,
thumbnailUrl
});
} else {
this.addResult('Streaming URLs', false, 'Failed to generate URLs');
}
} catch (error) {
this.addResult('Streaming URLs', false, `Failed to test URLs: ${error instanceof Error ? error.message : 'Unknown error'}`);
}
}
private addResult(test: string, success: boolean, message: string, data?: any): void {
this.results.push({ test, success, message, data });
}
}
// Export for use in browser console
if (typeof window !== 'undefined') {
(window as any).VideoStreamingTester = VideoStreamingTester;
(window as any).runVideoStreamingTests = async () => {
const tester = new VideoStreamingTester();
return await tester.runAllTests();
};
}
export default VideoStreamingTester;

View File

@@ -1,51 +0,0 @@
// Simple test file to verify vision API client functionality
// This is not a formal test suite, just a manual verification script
import { visionApi, formatBytes, formatDuration, formatUptime } from '../lib/visionApi'
// Test utility functions
console.log('Testing utility functions:')
console.log('formatBytes(1024):', formatBytes(1024)) // Should be "1 KB"
console.log('formatBytes(1048576):', formatBytes(1048576)) // Should be "1 MB"
console.log('formatDuration(65):', formatDuration(65)) // Should be "1m 5s"
console.log('formatUptime(3661):', formatUptime(3661)) // Should be "1h 1m"
// Test API endpoints (these will fail if vision system is not running)
export async function testVisionApi() {
try {
console.log('Testing vision API endpoints...')
// Test health endpoint
const health = await visionApi.getHealth()
console.log('Health check:', health)
// Test system status
const status = await visionApi.getSystemStatus()
console.log('System status:', status)
// Test cameras
const cameras = await visionApi.getCameras()
console.log('Cameras:', cameras)
// Test machines
const machines = await visionApi.getMachines()
console.log('Machines:', machines)
// Test storage stats
const storage = await visionApi.getStorageStats()
console.log('Storage stats:', storage)
// Test recordings
const recordings = await visionApi.getRecordings()
console.log('Recordings:', recordings)
console.log('All API tests passed!')
return true
} catch (error) {
console.error('API test failed:', error)
return false
}
}
// Uncomment the line below to run the test when this file is imported
// testVisionApi()

View File

@@ -1,229 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Camera Configuration Test</title>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
.test-section {
margin: 20px 0;
padding: 15px;
border: 1px solid #ddd;
border-radius: 5px;
}
.success {
background-color: #d4edda;
border-color: #c3e6cb;
color: #155724;
}
.error {
background-color: #f8d7da;
border-color: #f5c6cb;
color: #721c24;
}
.loading {
background-color: #d1ecf1;
border-color: #bee5eb;
color: #0c5460;
}
button {
background-color: #007bff;
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
cursor: pointer;
margin: 5px;
}
button:hover {
background-color: #0056b3;
}
button:disabled {
background-color: #6c757d;
cursor: not-allowed;
}
pre {
background-color: #f8f9fa;
padding: 10px;
border-radius: 5px;
overflow-x: auto;
white-space: pre-wrap;
}
</style>
</head>
<body>
<h1>Camera Configuration API Test</h1>
<p>This page tests the camera configuration API to verify the auto-recording fields issue is resolved.</p>
<div class="test-section">
<h3>Test Camera Configuration API</h3>
<button onclick="testCameraConfig()">Test Camera Config</button>
<button onclick="testCameraList()">Test Camera List</button>
<button onclick="testApiFixMethod()">Test API Fix Method</button>
<div id="test-results"></div>
</div>
<script src="test-api-fix.js"></script>
<script>
const API_BASE = 'http://localhost:8000'; // Change to your vision API URL if different
async function testCameraList() {
const resultsDiv = document.getElementById('test-results');
resultsDiv.innerHTML = '<div class="loading">Testing camera list...</div>';
try {
const response = await fetch(`${API_BASE}/cameras`);
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const cameras = await response.json();
resultsDiv.innerHTML = `
<div class="success">
<h4>✅ Camera List Success</h4>
<p>Found ${Object.keys(cameras).length} cameras:</p>
<pre>${JSON.stringify(cameras, null, 2)}</pre>
</div>
`;
// Store camera names for config test
window.availableCameras = Object.keys(cameras);
} catch (error) {
resultsDiv.innerHTML = `
<div class="error">
<h4>❌ Camera List Failed</h4>
<p>Error: ${error.message}</p>
</div>
`;
}
}
async function testCameraConfig() {
const resultsDiv = document.getElementById('test-results');
// First get camera list if we don't have it
if (!window.availableCameras) {
await testCameraList();
if (!window.availableCameras || window.availableCameras.length === 0) {
return;
}
}
const cameraName = window.availableCameras[0]; // Use first camera
resultsDiv.innerHTML = `<div class="loading">Testing camera configuration for ${cameraName}...</div>`;
try {
const response = await fetch(`${API_BASE}/cameras/${cameraName}/config`);
if (!response.ok) {
const errorText = await response.text();
throw new Error(`HTTP ${response.status}: ${response.statusText}\n${errorText}`);
}
const config = await response.json();
// Check if auto-recording fields are present
const hasAutoRecordingFields =
'auto_start_recording_enabled' in config ||
'auto_recording_max_retries' in config ||
'auto_recording_retry_delay_seconds' in config;
resultsDiv.innerHTML = `
<div class="success">
<h4>✅ Camera Configuration Success</h4>
<p>Camera: ${cameraName}</p>
<p>Auto-recording fields present: ${hasAutoRecordingFields ? 'Yes' : 'No'}</p>
<details>
<summary>Full Configuration</summary>
<pre>${JSON.stringify(config, null, 2)}</pre>
</details>
</div>
`;
} catch (error) {
resultsDiv.innerHTML = `
<div class="error">
<h4>❌ Camera Configuration Failed</h4>
<p>Camera: ${cameraName}</p>
<p>Error: ${error.message}</p>
<details>
<summary>Error Details</summary>
<pre>${error.stack || error.toString()}</pre>
</details>
</div>
`;
}
}
async function testApiFixMethod() {
const resultsDiv = document.getElementById('test-results');
resultsDiv.innerHTML = '<div class="loading">Testing API fix method...</div>';
try {
const api = new TestVisionApiClient();
// Get cameras first
const cameras = await api.getCameras();
const cameraNames = Object.keys(cameras);
if (cameraNames.length === 0) {
throw new Error('No cameras found');
}
const cameraName = cameraNames[0];
const config = await api.getCameraConfig(cameraName);
resultsDiv.innerHTML = `
<div class="success">
<h4>✅ API Fix Method Success</h4>
<p>Camera: ${cameraName}</p>
<p>Auto-recording fields:</p>
<ul>
<li>auto_start_recording_enabled: ${config.auto_start_recording_enabled}</li>
<li>auto_recording_max_retries: ${config.auto_recording_max_retries}</li>
<li>auto_recording_retry_delay_seconds: ${config.auto_recording_retry_delay_seconds}</li>
</ul>
<details>
<summary>Full Configuration</summary>
<pre>${JSON.stringify(config, null, 2)}</pre>
</details>
</div>
`;
} catch (error) {
resultsDiv.innerHTML = `
<div class="error">
<h4>❌ API Fix Method Failed</h4>
<p>Error: ${error.message}</p>
</div>
`;
}
}
// Auto-run camera list test on page load
window.addEventListener('load', () => {
testCameraList();
});
</script>
</body>
</html>

View File

@@ -1,248 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Stop Streaming Test</title>
<style>
body {
font-family: Arial, sans-serif;
max-width: 800px;
margin: 0 auto;
padding: 20px;
}
.test-section {
margin: 20px 0;
padding: 15px;
border: 1px solid #ddd;
border-radius: 5px;
}
.success {
background-color: #d4edda;
border-color: #c3e6cb;
color: #155724;
}
.error {
background-color: #f8d7da;
border-color: #f5c6cb;
color: #721c24;
}
.loading {
background-color: #d1ecf1;
border-color: #bee5eb;
color: #0c5460;
}
button {
background-color: #007bff;
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
cursor: pointer;
margin: 5px;
}
button:hover {
background-color: #0056b3;
}
button:disabled {
background-color: #6c757d;
cursor: not-allowed;
}
.stop-btn {
background-color: #dc3545;
}
.stop-btn:hover {
background-color: #c82333;
}
input,
select {
padding: 8px;
margin: 5px;
border: 1px solid #ddd;
border-radius: 4px;
}
</style>
</head>
<body>
<h1>🛑 Stop Streaming API Test</h1>
<div class="test-section">
<h3>Test Stop Streaming Endpoint</h3>
<p>This test verifies that the stop streaming API endpoint works correctly.</p>
<div>
<label for="cameraSelect">Select Camera:</label>
<select id="cameraSelect">
<option value="">Loading cameras...</option>
</select>
<button onclick="testStopStreaming()" class="stop-btn">Stop Streaming</button>
</div>
<div id="test-results" class="test-section" style="display: none;"></div>
</div>
<div class="test-section">
<h3>Manual API Test</h3>
<p>Test the API endpoint directly:</p>
<div>
<input type="text" id="manualCamera" placeholder="Enter camera name (e.g., camera1)" value="camera1">
<button onclick="testManualStopStreaming()">Manual Stop Stream</button>
</div>
<div id="manual-results" class="test-section" style="display: none;"></div>
</div>
<script>
const API_BASE = 'http://localhost:8000';
let cameras = {};
// Load cameras on page load
window.onload = async function () {
await loadCameras();
};
async function loadCameras() {
try {
const response = await fetch(`${API_BASE}/cameras`);
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
cameras = await response.json();
const select = document.getElementById('cameraSelect');
select.innerHTML = '<option value="">Select a camera...</option>';
Object.keys(cameras).forEach(cameraName => {
const option = document.createElement('option');
option.value = cameraName;
option.textContent = `${cameraName} (${cameras[cameraName].status})`;
select.appendChild(option);
});
} catch (error) {
console.error('Error loading cameras:', error);
const select = document.getElementById('cameraSelect');
select.innerHTML = '<option value="">Error loading cameras</option>';
}
}
async function testStopStreaming() {
const cameraName = document.getElementById('cameraSelect').value;
if (!cameraName) {
alert('Please select a camera first');
return;
}
const resultsDiv = document.getElementById('test-results');
resultsDiv.style.display = 'block';
resultsDiv.innerHTML = '<div class="loading">Testing stop streaming...</div>';
try {
const response = await fetch(`${API_BASE}/cameras/${cameraName}/stop-stream`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
}
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`HTTP ${response.status}: ${response.statusText}\n${errorText}`);
}
const result = await response.json();
resultsDiv.innerHTML = `
<div class="success">
<h4>✅ Stop Streaming Success</h4>
<p>Camera: ${cameraName}</p>
<p>Success: ${result.success}</p>
<p>Message: ${result.message}</p>
<details>
<summary>Full Response</summary>
<pre>${JSON.stringify(result, null, 2)}</pre>
</details>
</div>
`;
} catch (error) {
resultsDiv.innerHTML = `
<div class="error">
<h4>❌ Stop Streaming Failed</h4>
<p>Camera: ${cameraName}</p>
<p>Error: ${error.message}</p>
<details>
<summary>Error Details</summary>
<pre>${error.stack || error.toString()}</pre>
</details>
</div>
`;
}
}
async function testManualStopStreaming() {
const cameraName = document.getElementById('manualCamera').value;
if (!cameraName) {
alert('Please enter a camera name');
return;
}
const resultsDiv = document.getElementById('manual-results');
resultsDiv.style.display = 'block';
resultsDiv.innerHTML = '<div class="loading">Testing manual stop streaming...</div>';
try {
const response = await fetch(`${API_BASE}/cameras/${cameraName}/stop-stream`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
}
});
const result = await response.json();
resultsDiv.innerHTML = `
<div class="${response.ok ? 'success' : 'error'}">
<h4>${response.ok ? '✅' : '❌'} Manual Stop Streaming ${response.ok ? 'Success' : 'Failed'}</h4>
<p>Camera: ${cameraName}</p>
<p>HTTP Status: ${response.status} ${response.statusText}</p>
<p>Success: ${result.success}</p>
<p>Message: ${result.message}</p>
<details>
<summary>Full Response</summary>
<pre>${JSON.stringify(result, null, 2)}</pre>
</details>
</div>
`;
} catch (error) {
resultsDiv.innerHTML = `
<div class="error">
<h4>❌ Manual Stop Streaming Failed</h4>
<p>Camera: ${cameraName}</p>
<p>Error: ${error.message}</p>
<details>
<summary>Error Details</summary>
<pre>${error.stack || error.toString()}</pre>
</details>
</div>
`;
}
}
</script>
</body>
</html>

View File

@@ -1,5 +1,6 @@
import { defineConfig } from 'vite' import { defineConfig } from 'vite'
import react from '@vitejs/plugin-react' import react from '@vitejs/plugin-react'
import federation from '@originjs/vite-plugin-federation'
import tailwindcss from '@tailwindcss/vite' import tailwindcss from '@tailwindcss/vite'
// https://vite.dev/config/ // https://vite.dev/config/
@@ -9,6 +10,22 @@ export default defineConfig({
jsxRuntime: 'automatic' jsxRuntime: 'automatic'
}), }),
tailwindcss(), tailwindcss(),
// Module Federation groundwork as shell. No remotes yet; does not change behavior.
federation({
name: 'dashboard-shell',
filename: 'remoteEntry.js',
exposes: {
// You can expose shared UI/components later if needed
},
remotes: {
// Allow overriding by env; default to localhost for dev
videoRemote: process.env.VITE_VIDEO_REMOTE_URL || 'http://localhost:3001/assets/remoteEntry.js'
},
shared: {
react: { singleton: true, eager: true },
'react-dom': { singleton: true, eager: true },
},
})
], ],
server: { server: {
// Allow connections from the VM hostname and any other host/IP // Allow connections from the VM hostname and any other host/IP

14
video-remote/index.html Normal file
View File

@@ -0,0 +1,14 @@
<!doctype html>
<html>
<head>
<meta charset="UTF-8" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>video-remote</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.tsx"></script>
</body>
</html>

3985
video-remote/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

28
video-remote/package.json Normal file
View File

@@ -0,0 +1,28 @@
{
"name": "video-remote",
"private": true,
"version": "0.0.1",
"type": "module",
"scripts": {
"dev": "vite",
"build": "vite build",
"serve:dist": "serve -s dist -l 3001",
"preview": "vite preview --port 3001"
},
"dependencies": {
"react": "^19.1.0",
"react-dom": "^19.1.0"
},
"devDependencies": {
"@originjs/vite-plugin-federation": "^1.3.3",
"@vitejs/plugin-react": "^4.6.0",
"@tailwindcss/vite": "^4.1.11",
"tailwindcss": "^4.1.11",
"http-server": "^14.1.1",
"serve": "^14.2.3",
"vite": "^7.0.4",
"typescript": "~5.8.3"
}
}

20
video-remote/src/App.tsx Normal file
View File

@@ -0,0 +1,20 @@
import React from 'react'
import { useState } from 'react'
import { VideoList } from './components/VideoList'
import { VideoModal } from './components/VideoModal'
const App: React.FC = () => {
const [selected, setSelected] = useState<string | null>(null)
return (
<div className="p-6 space-y-4">
<h1 className="text-2xl font-bold">Video Library</h1>
<VideoList onSelect={(id) => setSelected(id)} />
<VideoModal fileId={selected} onClose={() => setSelected(null)} />
</div>
)
}
export default App

View File

@@ -0,0 +1,36 @@
import React, { useState } from 'react'
type Props = {
onChange: (filters: { camera_name?: string; start_date?: string; end_date?: string }) => void
}
export const FiltersBar: React.FC<Props> = ({ onChange }) => {
const [camera, setCamera] = useState('')
const [start, setStart] = useState('')
const [end, setEnd] = useState('')
return (
<div className="grid grid-cols-1 md:grid-cols-4 gap-3">
<div>
<label className="block text-sm mb-1">Camera</label>
<input value={camera} onChange={e=>setCamera(e.target.value)} placeholder="camera1" className="w-full px-3 py-2 border rounded" />
</div>
<div>
<label className="block text-sm mb-1">Start date</label>
<input type="date" value={start} onChange={e=>setStart(e.target.value)} className="w-full px-3 py-2 border rounded" />
</div>
<div>
<label className="block text-sm mb-1">End date</label>
<input type="date" value={end} onChange={e=>setEnd(e.target.value)} className="w-full px-3 py-2 border rounded" />
</div>
<div className="flex items-end gap-2">
<button className="px-3 py-2 border rounded" onClick={()=>onChange({ camera_name: camera || undefined, start_date: start || undefined, end_date: end || undefined })}>Apply</button>
<button className="px-3 py-2 border rounded" onClick={()=>{ setCamera(''); setStart(''); setEnd(''); onChange({}) }}>Clear</button>
</div>
</div>
)
}
export default FiltersBar

View File

@@ -0,0 +1,22 @@
import React from 'react'
type Props = {
page: number
totalPages: number
onChange: (page: number) => void
}
export const Pagination: React.FC<Props> = ({ page, totalPages, onChange }) => {
if (totalPages <= 1) return null
return (
<div className="flex items-center gap-2">
<button disabled={page<=1} onClick={()=>onChange(page-1)} className="px-3 py-1 border rounded disabled:opacity-50">Prev</button>
<span className="text-sm">Page {page} / {totalPages}</span>
<button disabled={page>=totalPages} onClick={()=>onChange(page+1)} className="px-3 py-1 border rounded disabled:opacity-50">Next</button>
</div>
)
}
export default Pagination

View File

@@ -0,0 +1,42 @@
import React from 'react'
import { VideoThumbnail } from './VideoThumbnail'
export type VideoFile = {
file_id: string
filename: string
camera_name: string
file_size_bytes: number
created_at: string
status: string
format: string
is_streamable?: boolean
needs_conversion?: boolean
metadata?: { duration_seconds: number; width: number; height: number; fps: number; codec: string }
}
export type VideoCardProps = {
video: VideoFile
onClick?: (video: VideoFile) => void
showMetadata?: boolean
className?: string
}
export const VideoCard: React.FC<VideoCardProps> = ({ video, onClick }) => {
return (
<div
className={`bg-white rounded-xl border border-gray-200 shadow-sm hover:shadow-md transition-shadow overflow-hidden ${onClick ? 'cursor-pointer' : ''}`}
onClick={onClick ? () => onClick(video) : undefined}
>
<VideoThumbnail fileId={video.file_id} width={640} height={360} className="w-full h-auto" alt={video.filename} />
<div className="p-4">
<div className="text-sm text-gray-500 mb-1">{video.camera_name}</div>
<div className="font-semibold text-gray-900 truncate" title={video.filename}>{video.filename}</div>
</div>
</div>
)
}
export default VideoCard

View File

@@ -0,0 +1,52 @@
import React, { useEffect, useState } from 'react'
import { fetchVideos } from '../services/video'
import { Pagination } from './Pagination'
import VideoCard from './VideoCard'
import { FiltersBar } from './FiltersBar'
type Props = { onSelect?: (fileId: string) => void }
export const VideoList: React.FC<Props> = ({ onSelect }) => {
const [videos, setVideos] = useState<any[]>([])
const [page, setPage] = useState(1)
const [totalPages, setTotalPages] = useState(1)
const [loading, setLoading] = useState(true)
const [error, setError] = useState<string | null>(null)
const [filters, setFilters] = useState<{ camera_name?: string; start_date?: string; end_date?: string }>({})
async function load(p: number, f = filters) {
setLoading(true)
setError(null)
try {
const res = await fetchVideos({ page: p, limit: 12, ...f })
setVideos(res.videos)
setTotalPages(res.total_pages || 1)
} catch (e: any) {
setError(e?.message || 'Failed to load videos')
} finally {
setLoading(false)
}
}
useEffect(() => { load(page) }, [page])
if (loading) return <div className="p-6">Loading videos</div>
if (error) return <div className="p-6 text-red-600">{error}</div>
return (
<div className="space-y-6">
<FiltersBar onChange={(f) => { setFilters(f); setPage(1); load(1, f) }} />
<div className="grid grid-cols-1 sm:grid-cols-2 md:grid-cols-3 lg:grid-cols-4 gap-6">
{videos.map(v => (
<VideoCard key={v.file_id} video={v} onClick={onSelect ? () => onSelect(v.file_id) : undefined} />
))}
</div>
<Pagination page={page} totalPages={totalPages} onChange={setPage} />
</div>
)
}
export default VideoList

View File

@@ -0,0 +1,29 @@
import React from 'react'
const BASE = (import.meta as any).env?.VITE_VISION_API_URL || '/api'
type Props = {
fileId: string | null
onClose: () => void
}
export const VideoModal: React.FC<Props> = ({ fileId, onClose }) => {
if (!fileId) return null
const src = `${BASE}/videos/${fileId}/stream`
return (
<div className="fixed inset-0 z-[1000] bg-black/50 flex items-center justify-center">
<div className="bg-white rounded-lg w-full max-w-3xl overflow-hidden">
<div className="flex items-center justify-between p-3 border-b">
<div className="font-semibold">Video</div>
<button onClick={onClose} className="px-2 py-1 border rounded">Close</button>
</div>
<div className="p-3">
<video src={src} controls className="w-full h-auto" />
</div>
</div>
</div>
)
}
export default VideoModal

View File

@@ -0,0 +1,26 @@
import React from 'react'
const BASE = (import.meta as any).env?.VITE_VISION_API_URL || '/api'
type Props = {
fileId: string
width?: number
height?: number
alt?: string
className?: string
onClick?: () => void
}
export const VideoThumbnail: React.FC<Props> = ({ fileId, width = 320, height = 180, alt = '', className = '', onClick }) => {
const p = new URLSearchParams()
if (width) p.append('width', String(width))
if (height) p.append('height', String(height))
const qs = p.toString()
const src = `${BASE}/videos/${fileId}/thumbnail${qs ? `?${qs}` : ''}`
return (
<img src={src} alt={alt} width={width} height={height} className={className} onClick={onClick} />
)
}
export default VideoThumbnail

View File

@@ -0,0 +1,3 @@
@import "tailwindcss";

View File

@@ -0,0 +1,9 @@
import React from 'react'
import { createRoot } from 'react-dom/client'
import App from './App'
import './index.css'
const root = createRoot(document.getElementById('root')!)
root.render(<App />)

View File

@@ -0,0 +1,24 @@
type VideoListParams = { camera_name?: string; start_date?: string; end_date?: string; limit?: number; page?: number; offset?: number; include_metadata?: boolean }
type VideoFile = { file_id: string; camera_name: string; filename: string; file_size_bytes: number; format: string; status: string; created_at: string; is_streamable: boolean; needs_conversion: boolean }
type VideoListResponse = { videos: VideoFile[]; total_count: number; page?: number; total_pages?: number }
const BASE = (import.meta as any).env?.VITE_VISION_API_URL || '/api'
async function get<T>(path: string): Promise<T> {
const res = await fetch(path, { headers: { 'Accept': 'application/json' } })
if (!res.ok) throw new Error(`Request failed: ${res.status}`)
return res.json() as Promise<T>
}
export async function fetchVideos(params: VideoListParams = {}): Promise<VideoListResponse> {
const search = new URLSearchParams()
Object.entries(params).forEach(([k, v]) => {
if (v !== undefined && v !== null) search.append(k, String(v))
})
const qs = search.toString()
return get(`${BASE}/videos/${qs ? `?${qs}` : ''}`)
}

Some files were not shown because too many files have changed in this diff Show More