Add comprehensive tests for camera streaming, time synchronization, and auto-recording functionality

- Implemented test script for camera streaming functionality, covering API endpoints and concurrent recording.
- Created time verification script to check system time synchronization against multiple APIs.
- Developed timezone utility tests to validate timezone functions and logging.
- Added integration tests for system components, including configuration, camera discovery, and API endpoints.
- Enhanced MQTT logging and API endpoint tests for machine and MQTT status.
- Established auto-recording tests to simulate state changes and verify automatic recording behavior.
- Created simple tests for auto-recording configuration and API model validation.
This commit is contained in:
Alireza Vaezi
2025-07-29 11:15:10 -04:00
parent 0c92b6c277
commit ff7cb2c8f3
48 changed files with 935 additions and 10 deletions

View File

@@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""
Test script to verify the frame conversion fix works correctly.
"""
import sys
import os
import numpy as np
# Add the current directory to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Add camera SDK to path
sys.path.append(os.path.join(os.path.dirname(__file__), "camera_sdk"))
try:
import mvsdk
print("✅ mvsdk imported successfully")
except ImportError as e:
print(f"❌ Failed to import mvsdk: {e}")
sys.exit(1)
def test_frame_conversion():
"""Test the frame conversion logic"""
print("🧪 Testing frame conversion logic...")
# Simulate frame data
width, height = 640, 480
frame_size = width * height * 3 # RGB
# Create mock frame data
mock_frame_data = np.random.randint(0, 255, frame_size, dtype=np.uint8)
# Create a mock frame buffer (simulate memory address)
frame_buffer = mock_frame_data.ctypes.data
# Create mock FrameHead
class MockFrameHead:
def __init__(self):
self.iWidth = width
self.iHeight = height
self.uBytes = frame_size
frame_head = MockFrameHead()
try:
# Test the conversion logic (similar to what's in streamer.py)
frame_data_buffer = (mvsdk.c_ubyte * frame_head.uBytes).from_address(frame_buffer)
frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
frame = frame_data.reshape((frame_head.iHeight, frame_head.iWidth, 3))
print(f"✅ Frame conversion successful!")
print(f" Frame shape: {frame.shape}")
print(f" Frame dtype: {frame.dtype}")
print(f" Frame size: {frame.size} bytes")
return True
except Exception as e:
print(f"❌ Frame conversion failed: {e}")
return False
def main():
print("🔧 Frame Conversion Test")
print("=" * 40)
success = test_frame_conversion()
if success:
print("\n✅ Frame conversion fix is working correctly!")
print("📋 The streaming issue should be resolved after system restart.")
else:
print("\n❌ Frame conversion fix needs more work.")
print("\n💡 To apply the fix:")
print("1. Restart the USDA vision system")
print("2. Test streaming again")
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,199 @@
#!/usr/bin/env python3
"""
Test script for camera streaming functionality.
This script tests the new streaming capabilities without interfering with recording.
"""
import sys
import os
import time
import requests
import threading
from datetime import datetime
# Add the current directory to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def test_api_endpoints():
"""Test the streaming API endpoints"""
base_url = "http://localhost:8000"
print("🧪 Testing Camera Streaming API Endpoints")
print("=" * 50)
# Test system status
try:
response = requests.get(f"{base_url}/system/status", timeout=5)
if response.status_code == 200:
print("✅ System status endpoint working")
data = response.json()
print(f" System: {data.get('status', 'Unknown')}")
print(f" Camera Manager: {'Running' if data.get('camera_manager_running') else 'Stopped'}")
else:
print(f"❌ System status endpoint failed: {response.status_code}")
except Exception as e:
print(f"❌ System status endpoint error: {e}")
# Test camera list
try:
response = requests.get(f"{base_url}/cameras", timeout=5)
if response.status_code == 200:
print("✅ Camera list endpoint working")
cameras = response.json()
print(f" Found {len(cameras)} cameras: {list(cameras.keys())}")
# Test streaming for each camera
for camera_name in cameras.keys():
test_camera_streaming(base_url, camera_name)
else:
print(f"❌ Camera list endpoint failed: {response.status_code}")
except Exception as e:
print(f"❌ Camera list endpoint error: {e}")
def test_camera_streaming(base_url, camera_name):
"""Test streaming for a specific camera"""
print(f"\n🎥 Testing streaming for {camera_name}")
print("-" * 30)
# Test start streaming
try:
response = requests.post(f"{base_url}/cameras/{camera_name}/start-stream", timeout=10)
if response.status_code == 200:
print(f"✅ Start stream endpoint working for {camera_name}")
data = response.json()
print(f" Response: {data.get('message', 'No message')}")
else:
print(f"❌ Start stream failed for {camera_name}: {response.status_code}")
print(f" Error: {response.text}")
return
except Exception as e:
print(f"❌ Start stream error for {camera_name}: {e}")
return
# Wait a moment for stream to initialize
time.sleep(2)
# Test stream endpoint (just check if it responds)
try:
response = requests.get(f"{base_url}/cameras/{camera_name}/stream", timeout=5, stream=True)
if response.status_code == 200:
print(f"✅ Stream endpoint responding for {camera_name}")
print(f" Content-Type: {response.headers.get('content-type', 'Unknown')}")
# Read a small amount of data to verify it's working
chunk_count = 0
for chunk in response.iter_content(chunk_size=1024):
chunk_count += 1
if chunk_count >= 3: # Read a few chunks then stop
break
print(f" Received {chunk_count} data chunks")
else:
print(f"❌ Stream endpoint failed for {camera_name}: {response.status_code}")
except Exception as e:
print(f"❌ Stream endpoint error for {camera_name}: {e}")
# Test stop streaming
try:
response = requests.post(f"{base_url}/cameras/{camera_name}/stop-stream", timeout=5)
if response.status_code == 200:
print(f"✅ Stop stream endpoint working for {camera_name}")
data = response.json()
print(f" Response: {data.get('message', 'No message')}")
else:
print(f"❌ Stop stream failed for {camera_name}: {response.status_code}")
except Exception as e:
print(f"❌ Stop stream error for {camera_name}: {e}")
def test_concurrent_recording_and_streaming():
"""Test that streaming doesn't interfere with recording"""
base_url = "http://localhost:8000"
print("\n🔄 Testing Concurrent Recording and Streaming")
print("=" * 50)
try:
# Get available cameras
response = requests.get(f"{base_url}/cameras", timeout=5)
if response.status_code != 200:
print("❌ Cannot get camera list for concurrent test")
return
cameras = response.json()
if not cameras:
print("❌ No cameras available for concurrent test")
return
camera_name = list(cameras.keys())[0] # Use first camera
print(f"Using camera: {camera_name}")
# Start streaming
print("1. Starting streaming...")
response = requests.post(f"{base_url}/cameras/{camera_name}/start-stream", timeout=10)
if response.status_code != 200:
print(f"❌ Failed to start streaming: {response.text}")
return
time.sleep(2)
# Start recording
print("2. Starting recording...")
response = requests.post(f"{base_url}/cameras/{camera_name}/start-recording",
json={"filename": "test_concurrent_recording.avi"}, timeout=10)
if response.status_code == 200:
print("✅ Recording started successfully while streaming")
else:
print(f"❌ Failed to start recording while streaming: {response.text}")
# Let both run for a few seconds
print("3. Running both streaming and recording for 5 seconds...")
time.sleep(5)
# Stop recording
print("4. Stopping recording...")
response = requests.post(f"{base_url}/cameras/{camera_name}/stop-recording", timeout=5)
if response.status_code == 200:
print("✅ Recording stopped successfully")
else:
print(f"❌ Failed to stop recording: {response.text}")
# Stop streaming
print("5. Stopping streaming...")
response = requests.post(f"{base_url}/cameras/{camera_name}/stop-stream", timeout=5)
if response.status_code == 200:
print("✅ Streaming stopped successfully")
else:
print(f"❌ Failed to stop streaming: {response.text}")
print("✅ Concurrent test completed successfully!")
except Exception as e:
print(f"❌ Concurrent test error: {e}")
def main():
"""Main test function"""
print("🚀 USDA Vision Camera Streaming Test")
print("=" * 50)
print(f"Test started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print()
# Wait for system to be ready
print("⏳ Waiting for system to be ready...")
time.sleep(3)
# Run tests
test_api_endpoints()
test_concurrent_recording_and_streaming()
print("\n" + "=" * 50)
print("🏁 Test completed!")
print("\n📋 Next Steps:")
print("1. Open camera_preview.html in your browser")
print("2. Click 'Start Stream' for any camera")
print("3. Verify live preview works without blocking recording")
print("4. Test concurrent recording and streaming")
if __name__ == "__main__":
main()

View File

@@ -1,146 +0,0 @@
# GigE Camera Image Capture
This project provides simple Python scripts to connect to a GigE camera and capture images using the provided SDK.
## Files Overview
### Demo Files (provided with camera)
- `python demo/mvsdk.py` - Main SDK wrapper library
- `python demo/grab.py` - Basic image capture example
- `python demo/cv_grab.py` - OpenCV-based continuous capture
- `python demo/cv_grab_callback.py` - Callback-based capture
- `python demo/readme.txt` - Original demo documentation
### Custom Scripts
- `camera_capture.py` - Standalone script to capture 10 images with 200ms intervals
- `test.ipynb` - Jupyter notebook with the same functionality
- `images/` - Directory where captured images are saved
## Features
- **Automatic camera detection** - Finds and connects to available GigE cameras
- **Configurable capture** - Currently set to capture 10 images with 200ms intervals
- **Both mono and color support** - Automatically detects camera type
- **Timestamped filenames** - Images saved with date/time stamps
- **Error handling** - Robust error handling for camera operations
- **Cross-platform** - Works on Windows and Linux (with appropriate image flipping)
## Requirements
- Python 3.x
- OpenCV (`cv2`)
- NumPy
- Matplotlib (for Jupyter notebook display)
- GigE camera SDK (MVSDK) - included in `python demo/` directory
## Usage
### Option 1: Standalone Script
Run the standalone Python script:
```bash
python camera_capture.py
```
This will:
1. Initialize the camera SDK
2. Detect available cameras
3. Connect to the first camera found
4. Configure camera settings (manual exposure, continuous mode)
5. Capture 10 images with 200ms intervals
6. Save images to the `images/` directory
7. Clean up and close the camera
### Option 2: Jupyter Notebook
Open and run the `test.ipynb` notebook:
```bash
jupyter notebook test.ipynb
```
The notebook provides the same functionality but with:
- Step-by-step execution
- Detailed explanations
- Visual display of the last captured image
- Better error reporting
## Camera Configuration
The scripts are configured with the following default settings:
- **Trigger Mode**: Continuous capture (mode 0)
- **Exposure**: Manual, 30ms
- **Output Format**:
- Monochrome cameras: MONO8
- Color cameras: BGR8
- **Image Processing**: Automatic ISP processing from RAW to RGB/MONO
## Output
Images are saved in the `images/` directory with the following naming convention:
```
image_XX_YYYYMMDD_HHMMSS_mmm.jpg
```
Where:
- `XX` = Image number (01-10)
- `YYYYMMDD_HHMMSS_mmm` = Timestamp with milliseconds
Example: `image_01_20250722_140530_123.jpg`
## Troubleshooting
### Common Issues
1. **"No camera was found!"**
- Check camera connection (Ethernet cable)
- Verify camera power
- Check network settings (camera and PC should be on same subnet)
- Ensure camera drivers are installed
2. **"CameraInit Failed"**
- Camera might be in use by another application
- Check camera permissions
- Try restarting the camera or PC
3. **"Failed to capture image"**
- Check camera settings
- Verify sufficient lighting
- Check exposure settings
4. **Images appear upside down**
- This is handled automatically on Windows
- Linux users may need to adjust the flip settings
### Network Configuration
For GigE cameras, ensure:
- Camera and PC are on the same network segment
- PC network adapter supports Jumbo frames (recommended)
- Firewall allows camera communication
- Sufficient network bandwidth
## Customization
You can modify the scripts to:
- **Change capture count**: Modify the range in the capture loop
- **Adjust timing**: Change the `time.sleep(0.2)` value
- **Modify exposure**: Change the exposure time parameter
- **Change output format**: Modify file format and quality settings
- **Add image processing**: Insert processing steps before saving
## SDK Reference
The camera SDK (`mvsdk.py`) provides extensive functionality:
- Camera enumeration and initialization
- Image capture and processing
- Parameter configuration (exposure, gain, etc.)
- Trigger modes and timing
- Image format conversion
- Error handling
Refer to the original SDK documentation for advanced features.

View File

@@ -1,184 +0,0 @@
# USDA Vision Camera System - Implementation Summary
## 🎉 Project Completed Successfully!
The USDA Vision Camera System has been fully implemented and tested. All components are working correctly and the system is ready for deployment.
## ✅ What Was Built
### Core Architecture
- **Modular Design**: Clean separation of concerns across multiple modules
- **Multi-threading**: Concurrent MQTT listening, camera monitoring, and recording
- **Event-driven**: Thread-safe communication between components
- **Configuration-driven**: JSON-based configuration system
### Key Components
1. **MQTT Integration** (`usda_vision_system/mqtt/`)
- Listens to two machine topics: `vision/vibratory_conveyor/state` and `vision/blower_separator/state`
- Thread-safe message handling with automatic reconnection
- State normalization (on/off/error)
2. **Camera Management** (`usda_vision_system/camera/`)
- Automatic GigE camera discovery using python demo library
- Periodic status monitoring (every 2 seconds)
- Camera initialization and configuration management
- **Discovered Cameras**:
- Blower-Yield-Cam (192.168.1.165)
- Cracker-Cam (192.168.1.167)
3. **Video Recording** (`usda_vision_system/camera/recorder.py`)
- Automatic recording start/stop based on machine states
- Timestamp-based file naming: `camera1_recording_20250726_143022.avi`
- Configurable FPS, exposure, and gain settings
- Thread-safe recording with proper cleanup
4. **Storage Management** (`usda_vision_system/storage/`)
- Organized file storage under `./storage/camera1/` and `./storage/camera2/`
- File indexing and metadata tracking
- Automatic cleanup of old files
- Storage statistics and integrity checking
5. **REST API Server** (`usda_vision_system/api/`)
- FastAPI server on port 8000
- Real-time WebSocket updates
- Manual recording control endpoints
- System status and monitoring endpoints
6. **Comprehensive Logging** (`usda_vision_system/core/logging_config.py`)
- Colored console output
- Rotating log files
- Component-specific log levels
- Performance monitoring and error tracking
## 🚀 How to Use
### Quick Start
```bash
# Run system tests
python test_system.py
# Start the system
python main.py
# Or use the startup script
./start_system.sh
```
### Configuration
Edit `config.json` to customize:
- MQTT broker settings
- Camera configurations
- Storage paths
- System parameters
### API Access
- System status: `http://localhost:8000/system/status`
- Camera status: `http://localhost:8000/cameras`
- Manual recording: `POST http://localhost:8000/cameras/camera1/start-recording`
- Real-time updates: WebSocket at `ws://localhost:8000/ws`
## 📊 Test Results
All system tests passed successfully:
- ✅ Module imports
- ✅ Configuration loading
- ✅ Camera discovery (found 2 cameras)
- ✅ Storage setup
- ✅ MQTT configuration
- ✅ System initialization
- ✅ API endpoints
## 🔧 System Behavior
### Automatic Recording Flow
1. **Machine turns ON** → MQTT message received → Recording starts automatically
2. **Machine turns OFF** → MQTT message received → Recording stops and saves file
3. **Files saved** with timestamp: `camera1_recording_YYYYMMDD_HHMMSS.avi`
### Manual Control
- Start/stop recording via API calls
- Monitor system status in real-time
- Check camera availability on demand
### Dashboard Integration
The system is designed to integrate with your React + Vite + Tailwind + Supabase dashboard:
- REST API for status queries
- WebSocket for real-time updates
- JSON responses for easy frontend consumption
## 📁 Project Structure
```
usda_vision_system/
├── core/ # Configuration, state management, events, logging
├── mqtt/ # MQTT client and message handlers
├── camera/ # Camera management, monitoring, recording
├── storage/ # File organization and management
├── api/ # FastAPI server and WebSocket support
└── main.py # Application coordinator
Supporting Files:
├── main.py # Entry point script
├── config.json # System configuration
├── test_system.py # Test suite
├── start_system.sh # Startup script
└── README_SYSTEM.md # Comprehensive documentation
```
## 🎯 Key Features Delivered
-**Dual MQTT topic listening** for two machines
-**Automatic camera recording** triggered by machine states
-**GigE camera support** using python demo library
-**Thread-safe multi-tasking** (MQTT + camera monitoring + recording)
-**Timestamp-based file naming** in organized directories
-**2-second camera status monitoring** with on-demand checks
-**REST API and WebSocket** for dashboard integration
-**Comprehensive logging** with error tracking
-**Configuration management** via JSON
-**Storage management** with cleanup capabilities
-**Graceful startup/shutdown** with signal handling
## 🔮 Ready for Dashboard Integration
The system provides everything needed for your React dashboard:
```javascript
// Example API usage
const systemStatus = await fetch('http://localhost:8000/system/status');
const cameras = await fetch('http://localhost:8000/cameras');
// WebSocket for real-time updates
const ws = new WebSocket('ws://localhost:8000/ws');
ws.onmessage = (event) => {
const update = JSON.parse(event.data);
// Handle real-time system updates
};
// Manual recording control
await fetch('http://localhost:8000/cameras/camera1/start-recording', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ camera_name: 'camera1' })
});
```
## 🎊 Next Steps
The system is production-ready! You can now:
1. **Deploy** the system on your target hardware
2. **Integrate** with your existing React dashboard
3. **Configure** MQTT topics and camera settings as needed
4. **Monitor** system performance through logs and API endpoints
5. **Extend** functionality as requirements evolve
The modular architecture makes it easy to add new features, cameras, or MQTT topics in the future.
---
**System Status**: ✅ **FULLY OPERATIONAL**
**Test Results**: ✅ **ALL TESTS PASSING**
**Cameras Detected**: ✅ **2 GIGE CAMERAS READY**
**Ready for Production**: ✅ **YES**

View File

@@ -1 +0,0 @@
# USDA-Vision-Cameras

View File

@@ -1,249 +0,0 @@
# USDA Vision Camera System
A comprehensive system for monitoring machines via MQTT and automatically recording video from GigE cameras when machines are active.
## Overview
This system integrates MQTT machine monitoring with automated video recording from GigE cameras. When a machine turns on (detected via MQTT), the system automatically starts recording from the associated camera. When the machine turns off, recording stops and the video is saved with a timestamp.
## Features
- **MQTT Integration**: Listens to multiple machine state topics
- **Automatic Recording**: Starts/stops recording based on machine states
- **GigE Camera Support**: Uses the python demo library (mvsdk) for camera control
- **Multi-threading**: Concurrent MQTT listening, camera monitoring, and recording
- **REST API**: FastAPI server for dashboard integration
- **WebSocket Support**: Real-time status updates
- **Storage Management**: Organized file storage with cleanup capabilities
- **Comprehensive Logging**: Detailed logging with rotation and error tracking
- **Configuration Management**: JSON-based configuration system
## Architecture
```
┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐
│ MQTT Broker │ │ GigE Camera │ │ Dashboard │
│ │ │ │ │ (React) │
└─────────┬───────┘ └─────────┬───────┘ └─────────┬───────┘
│ │ │
│ Machine States │ Video Streams │ API Calls
│ │ │
┌─────────▼──────────────────────▼──────────────────────▼───────┐
│ USDA Vision Camera System │
├───────────────────────────────────────────────────────────────┤
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ MQTT Client │ │ Camera │ │ API Server │ │
│ │ │ │ Manager │ │ │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │
│ │ State │ │ Storage │ │ Event │ │
│ │ Manager │ │ Manager │ │ System │ │
│ └─────────────┘ └─────────────┘ └─────────────┘ │
└───────────────────────────────────────────────────────────────┘
```
## Installation
1. **Prerequisites**:
- Python 3.11+
- GigE cameras with python demo library
- MQTT broker (e.g., Mosquitto)
- uv package manager (recommended)
2. **Install Dependencies**:
```bash
uv sync
```
3. **Setup Storage Directory**:
```bash
sudo mkdir -p /storage
sudo chown $USER:$USER /storage
```
## Configuration
Edit `config.json` to configure your system:
```json
{
"mqtt": {
"broker_host": "192.168.1.110",
"broker_port": 1883,
"topics": {
"vibratory_conveyor": "vision/vibratory_conveyor/state",
"blower_separator": "vision/blower_separator/state"
}
},
"cameras": [
{
"name": "camera1",
"machine_topic": "vibratory_conveyor",
"storage_path": "/storage/camera1",
"exposure_ms": 1.0,
"gain": 3.5,
"target_fps": 3.0,
"enabled": true
}
]
}
```
## Usage
### Basic Usage
1. **Start the System**:
```bash
python main.py
```
2. **With Custom Config**:
```bash
python main.py --config my_config.json
```
3. **Debug Mode**:
```bash
python main.py --log-level DEBUG
```
### API Endpoints
The system provides a REST API on port 8000:
- `GET /system/status` - Overall system status
- `GET /cameras` - All camera statuses
- `GET /machines` - All machine states
- `POST /cameras/{name}/start-recording` - Manual recording start
- `POST /cameras/{name}/stop-recording` - Manual recording stop
- `GET /storage/stats` - Storage statistics
- `WebSocket /ws` - Real-time updates
### Dashboard Integration
The system is designed to integrate with your existing React + Vite + Tailwind + Supabase dashboard:
1. **API Integration**: Use the REST endpoints to display system status
2. **WebSocket**: Connect to `/ws` for real-time updates
3. **Supabase Storage**: Store recording metadata and system logs
## File Organization
```
/storage/
├── camera1/
│ ├── camera1_recording_20250726_143022.avi
│ └── camera1_recording_20250726_143155.avi
├── camera2/
│ ├── camera2_recording_20250726_143025.avi
│ └── camera2_recording_20250726_143158.avi
└── file_index.json
```
## Monitoring and Logging
### Log Files
- `usda_vision_system.log` - Main system log (rotated)
- Console output with colored formatting
- Component-specific log levels
### Performance Monitoring
The system includes built-in performance monitoring:
- Startup times
- Recording session metrics
- MQTT message processing rates
- Camera status check intervals
### Error Tracking
Comprehensive error tracking with:
- Error counts per component
- Detailed error context
- Automatic recovery attempts
## Troubleshooting
### Common Issues
1. **Camera Not Found**:
- Check camera connections
- Verify python demo library installation
- Run camera discovery: Check logs for enumeration results
2. **MQTT Connection Failed**:
- Verify broker IP and port
- Check network connectivity
- Verify credentials if authentication is enabled
3. **Recording Fails**:
- Check storage permissions
- Verify available disk space
- Check camera initialization logs
4. **API Server Won't Start**:
- Check if port 8000 is available
- Verify FastAPI dependencies
- Check firewall settings
### Debug Commands
```bash
# Check system status
curl http://localhost:8000/system/status
# Check camera status
curl http://localhost:8000/cameras
# Manual recording start
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{"camera_name": "camera1"}'
```
## Development
### Project Structure
```
usda_vision_system/
├── core/ # Core functionality
├── mqtt/ # MQTT client and handlers
├── camera/ # Camera management and recording
├── storage/ # File management
├── api/ # FastAPI server
└── main.py # Application coordinator
```
### Adding New Features
1. **New Camera Type**: Extend `camera/recorder.py`
2. **New MQTT Topics**: Update `config.json` and `mqtt/handlers.py`
3. **New API Endpoints**: Add to `api/server.py`
4. **New Events**: Define in `core/events.py`
### Testing
```bash
# Run basic system test
python -c "from usda_vision_system import USDAVisionSystem; s = USDAVisionSystem(); print('OK')"
# Test MQTT connection
python -c "from usda_vision_system.mqtt.client import MQTTClient; # ... test code"
# Test camera discovery
python -c "import sys; sys.path.append('python demo'); import mvsdk; print(len(mvsdk.CameraEnumerateDevice()))"
```
## License
This project is developed for USDA research purposes.
## Support
For issues and questions:
1. Check the logs in `usda_vision_system.log`
2. Review the troubleshooting section
3. Check API status at `http://localhost:8000/health`

View File

@@ -1,190 +0,0 @@
# Time Synchronization Setup - Atlanta, Georgia
## ✅ Time Synchronization Complete!
The USDA Vision Camera System has been configured for proper time synchronization with Atlanta, Georgia (Eastern Time Zone).
## 🕐 What Was Implemented
### System-Level Time Configuration
- **Timezone**: Set to `America/New_York` (Eastern Time)
- **Current Status**: Eastern Daylight Time (EDT, UTC-4)
- **NTP Sync**: Configured with multiple reliable time servers
- **Hardware Clock**: Synchronized with system time
### Application-Level Timezone Support
- **Timezone-Aware Timestamps**: All recordings use Atlanta time
- **Automatic DST Handling**: Switches between EST/EDT automatically
- **Time Sync Monitoring**: Built-in time synchronization checking
- **Consistent Formatting**: Standardized timestamp formats throughout
## 🔧 Key Features
### 1. Automatic Time Synchronization
```bash
# NTP servers configured:
- time.nist.gov (NIST atomic clock)
- pool.ntp.org (NTP pool)
- time.google.com (Google time)
- time.cloudflare.com (Cloudflare time)
```
### 2. Timezone-Aware Recording Filenames
```
Example: camera1_recording_20250725_213241.avi
Format: {camera}_{type}_{YYYYMMDD_HHMMSS}.avi
Time: Atlanta local time (EDT/EST)
```
### 3. Time Verification Tools
- **Startup Check**: Automatic time sync verification on system start
- **Manual Check**: `python check_time.py` for on-demand verification
- **API Integration**: Time sync status available via REST API
### 4. Comprehensive Logging
```
=== TIME SYNCHRONIZATION STATUS ===
System time: 2025-07-25 21:32:41 EDT
Timezone: EDT (-0400)
Daylight Saving: Yes
Sync status: synchronized
Time difference: 0.10 seconds
=====================================
```
## 🚀 Usage
### Automatic Operation
The system automatically:
- Uses Atlanta time for all timestamps
- Handles daylight saving time transitions
- Monitors time synchronization status
- Logs time-related events
### Manual Verification
```bash
# Check time synchronization
python check_time.py
# Test timezone functions
python test_timezone.py
# View system time status
timedatectl status
```
### API Endpoints
```bash
# System status includes time info
curl http://localhost:8000/system/status
# Example response includes:
{
"system_started": true,
"uptime_seconds": 3600,
"timestamp": "2025-07-25T21:32:41-04:00"
}
```
## 📊 Current Status
### Time Synchronization
-**System Timezone**: America/New_York (EDT)
-**NTP Sync**: Active and synchronized
-**Time Accuracy**: Within 0.1 seconds of atomic time
-**DST Support**: Automatic EST/EDT switching
### Application Integration
-**Recording Timestamps**: Atlanta time zone
-**Log Timestamps**: Timezone-aware logging
-**API Responses**: ISO format with timezone
-**File Naming**: Consistent Atlanta time format
### Monitoring
-**Startup Verification**: Time sync checked on boot
-**Continuous Monitoring**: Built-in sync status tracking
-**Error Detection**: Alerts for time drift issues
-**Manual Tools**: On-demand verification scripts
## 🔍 Technical Details
### Timezone Configuration
```json
{
"system": {
"timezone": "America/New_York"
}
}
```
### Time Sources
1. **Primary**: NIST atomic clock (time.nist.gov)
2. **Secondary**: NTP pool servers (pool.ntp.org)
3. **Backup**: Google/Cloudflare time servers
4. **Fallback**: Local system clock
### File Naming Convention
```
Pattern: {camera_name}_recording_{YYYYMMDD_HHMMSS}.avi
Example: camera1_recording_20250725_213241.avi
Timezone: Always Atlanta local time (EST/EDT)
```
## 🎯 Benefits
### For Operations
- **Consistent Timestamps**: All recordings use Atlanta time
- **Easy Correlation**: Timestamps match local business hours
- **Automatic DST**: No manual timezone adjustments needed
- **Reliable Sync**: Multiple time sources ensure accuracy
### For Analysis
- **Local Time Context**: Recordings timestamped in business timezone
- **Accurate Sequencing**: Precise timing for event correlation
- **Standard Format**: Consistent naming across all recordings
- **Audit Trail**: Complete time synchronization logging
### For Integration
- **Dashboard Ready**: Timezone-aware API responses
- **Database Compatible**: ISO format timestamps with timezone
- **Log Analysis**: Structured time information in logs
- **Monitoring**: Built-in time sync health checks
## 🔧 Maintenance
### Regular Checks
The system automatically:
- Verifies time sync on startup
- Logs time synchronization status
- Monitors for time drift
- Alerts on sync failures
### Manual Maintenance
```bash
# Force time sync
sudo systemctl restart systemd-timesyncd
# Check NTP status
timedatectl show-timesync --all
# Verify timezone
timedatectl status
```
## 📈 Next Steps
The time synchronization is now fully operational. The system will:
1. **Automatically maintain** accurate Atlanta time
2. **Generate timestamped recordings** with local time
3. **Monitor sync status** and alert on issues
4. **Provide timezone-aware** API responses for dashboard integration
All recording files will now have accurate Atlanta timestamps, making it easy to correlate with local business operations and machine schedules.
---
**Time Sync Status**: ✅ **SYNCHRONIZED**
**Timezone**: ✅ **America/New_York (EDT)**
**Accuracy**: ✅ **±0.1 seconds**
**Ready for Production**: ✅ **YES**

View File

@@ -1,191 +0,0 @@
# Camera Video Recorder
A Python script for recording videos from GigE cameras using the provided SDK with custom exposure and gain settings.
## Features
- **List all available cameras** - Automatically detects and displays all connected cameras
- **Custom camera settings** - Set exposure time to 1ms and gain to 3.5x (or custom values)
- **Video recording** - Record videos in AVI format with timestamp filenames
- **Live preview** - Test camera functionality with live preview mode
- **Interactive menu** - User-friendly menu system for all operations
- **Automatic cleanup** - Proper resource management and cleanup
## Requirements
- Python 3.x
- OpenCV (`cv2`)
- NumPy
- Camera SDK (mvsdk) - included in `python demo` directory
- GigE camera connected to the system
## Installation
1. Ensure your GigE camera is connected and properly configured
2. Make sure the `python demo` directory with `mvsdk.py` is present
3. Install required Python packages:
```bash
pip install opencv-python numpy
```
## Usage
### Basic Usage
Run the script:
```bash
python camera_video_recorder.py
```
The script will:
1. Display a welcome message and feature overview
2. List all available cameras
3. Let you select a camera (if multiple are available)
4. Allow you to set custom exposure and gain values
5. Present an interactive menu with options
### Menu Options
1. **Start Recording** - Begin video recording with timestamp filename
2. **List Camera Info** - Display detailed camera information
3. **Test Camera (Live Preview)** - View live camera feed without recording
4. **Exit** - Clean up and exit the program
### Default Settings
- **Exposure Time**: 1.0ms (1000 microseconds)
- **Gain**: 3.5x
- **Video Format**: AVI with XVID codec
- **Frame Rate**: 30 FPS
- **Output Directory**: `videos/` (created automatically)
### Recording Controls
- **Start Recording**: Select option 1 from the menu
- **Stop Recording**: Press 'q' in the preview window
- **Video Files**: Saved as `videos/camera_recording_YYYYMMDD_HHMMSS.avi`
## File Structure
```
camera_video_recorder.py # Main script
python demo/
mvsdk.py # Camera SDK wrapper
(other demo files)
videos/ # Output directory (created automatically)
camera_recording_*.avi # Recorded video files
```
## Script Features
### CameraVideoRecorder Class
- `list_cameras()` - Enumerate and display available cameras
- `initialize_camera()` - Set up camera with custom exposure and gain
- `start_recording()` - Initialize video writer and begin recording
- `stop_recording()` - Stop recording and save video file
- `record_loop()` - Main recording loop with live preview
- `cleanup()` - Proper resource cleanup
### Key Functions
- **Camera Detection**: Automatically finds all connected GigE cameras
- **Settings Validation**: Checks and clamps exposure/gain values to camera limits
- **Frame Processing**: Handles both monochrome and color cameras
- **Windows Compatibility**: Handles frame flipping for Windows systems
- **Error Handling**: Comprehensive error handling and user feedback
## Example Output
```
Camera Video Recorder
====================
This script allows you to:
- List all available cameras
- Record videos with custom exposure (1ms) and gain (3.5x) settings
- Save videos with timestamps
- Stop recording anytime with 'q' key
Found 1 camera(s):
0: GigE Camera Model (GigE) - SN: 12345678
Using camera: GigE Camera Model
Camera Settings:
Enter exposure time in ms (default 1.0): 1.0
Enter gain value (default 3.5): 3.5
Initializing camera with:
- Exposure: 1.0ms
- Gain: 3.5x
Camera type: Color
Set exposure time: 1000.0μs
Set analog gain: 3.50x (range: 1.00 - 16.00)
Camera started successfully
==================================================
Camera Video Recorder Menu
==================================================
1. Start Recording
2. List Camera Info
3. Test Camera (Live Preview)
4. Exit
Select option (1-4): 1
Started recording to: videos/camera_recording_20241223_143022.avi
Frame size: (1920, 1080), FPS: 30.0
Press 'q' to stop recording...
Recording... Press 'q' in the preview window to stop
Recording stopped!
Saved: videos/camera_recording_20241223_143022.avi
Frames recorded: 450
Duration: 15.2 seconds
Average FPS: 29.6
```
## Troubleshooting
### Common Issues
1. **"No cameras found!"**
- Check camera connection
- Verify camera power
- Ensure network configuration for GigE cameras
2. **"SDK initialization failed"**
- Verify `python demo/mvsdk.py` exists
- Check camera drivers are installed
3. **"Camera initialization failed"**
- Camera may be in use by another application
- Try disconnecting and reconnecting the camera
4. **Recording issues**
- Ensure sufficient disk space
- Check write permissions in the output directory
### Performance Tips
- Close other applications using the camera
- Ensure adequate system resources (CPU, RAM)
- Use SSD storage for better write performance
- Adjust frame rate if experiencing dropped frames
## Customization
You can modify the script to:
- Change video codec (currently XVID)
- Adjust target frame rate
- Modify output filename format
- Add additional camera settings
- Change preview window size
## Notes
- Videos are saved in the `videos/` directory with timestamp filenames
- The script handles both monochrome and color cameras automatically
- Frame flipping is handled automatically for Windows systems
- All resources are properly cleaned up on exit

View File

@@ -0,0 +1,227 @@
#!/usr/bin/env python3
"""
Simple test script for auto-recording functionality.
This script performs basic checks to verify that the auto-recording feature
is properly integrated and configured.
"""
import sys
import os
import json
import time
# Add the current directory to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def test_config_structure():
"""Test that config.json has the required auto-recording fields"""
print("🔍 Testing configuration structure...")
try:
with open("config.json", "r") as f:
config = json.load(f)
# Check system-level auto-recording setting
system_config = config.get("system", {})
if "auto_recording_enabled" not in system_config:
print("❌ Missing 'auto_recording_enabled' in system config")
return False
print(f"✅ System auto-recording enabled: {system_config['auto_recording_enabled']}")
# Check camera-level auto-recording settings
cameras = config.get("cameras", [])
if not cameras:
print("❌ No cameras found in config")
return False
for camera in cameras:
camera_name = camera.get("name", "unknown")
required_fields = [
"auto_start_recording_enabled",
"auto_recording_max_retries",
"auto_recording_retry_delay_seconds"
]
missing_fields = [field for field in required_fields if field not in camera]
if missing_fields:
print(f"❌ Camera {camera_name} missing fields: {missing_fields}")
return False
print(f"✅ Camera {camera_name} auto-recording config:")
print(f" - Enabled: {camera['auto_start_recording_enabled']}")
print(f" - Max retries: {camera['auto_recording_max_retries']}")
print(f" - Retry delay: {camera['auto_recording_retry_delay_seconds']}s")
print(f" - Machine topic: {camera.get('machine_topic', 'unknown')}")
return True
except Exception as e:
print(f"❌ Error reading config: {e}")
return False
def test_module_imports():
"""Test that all required modules can be imported"""
print("\n🔍 Testing module imports...")
try:
from usda_vision_system.recording.auto_manager import AutoRecordingManager
print("✅ AutoRecordingManager imported successfully")
from usda_vision_system.core.config import Config
config = Config("config.json")
print("✅ Config loaded successfully")
from usda_vision_system.core.state_manager import StateManager
state_manager = StateManager()
print("✅ StateManager created successfully")
from usda_vision_system.core.events import EventSystem
event_system = EventSystem()
print("✅ EventSystem created successfully")
# Test creating AutoRecordingManager (without camera_manager for now)
auto_manager = AutoRecordingManager(config, state_manager, event_system, None)
print("✅ AutoRecordingManager created successfully")
return True
except Exception as e:
print(f"❌ Import error: {e}")
return False
def test_camera_mapping():
"""Test camera to machine topic mapping"""
print("\n🔍 Testing camera to machine mapping...")
try:
with open("config.json", "r") as f:
config = json.load(f)
cameras = config.get("cameras", [])
expected_mappings = {
"camera1": "vibratory_conveyor", # Conveyor/cracker cam
"camera2": "blower_separator" # Blower separator
}
for camera in cameras:
camera_name = camera.get("name")
machine_topic = camera.get("machine_topic")
if camera_name in expected_mappings:
expected_topic = expected_mappings[camera_name]
if machine_topic == expected_topic:
print(f"{camera_name} correctly mapped to {machine_topic}")
else:
print(f"{camera_name} mapped to {machine_topic}, expected {expected_topic}")
return False
else:
print(f"⚠️ Unknown camera: {camera_name}")
return True
except Exception as e:
print(f"❌ Error checking mappings: {e}")
return False
def test_api_models():
"""Test that API models include auto-recording fields"""
print("\n🔍 Testing API models...")
try:
from usda_vision_system.api.models import (
CameraStatusResponse,
CameraConfigResponse,
AutoRecordingConfigRequest,
AutoRecordingConfigResponse,
AutoRecordingStatusResponse
)
# Check CameraStatusResponse has auto-recording fields
camera_response = CameraStatusResponse(
name="test",
status="available",
is_recording=False,
last_checked="2024-01-01T00:00:00",
auto_recording_enabled=True,
auto_recording_active=False,
auto_recording_failure_count=0
)
print("✅ CameraStatusResponse includes auto-recording fields")
# Check CameraConfigResponse has auto-recording fields
config_response = CameraConfigResponse(
name="test",
machine_topic="test_topic",
storage_path="/test",
enabled=True,
auto_start_recording_enabled=True,
auto_recording_max_retries=3,
auto_recording_retry_delay_seconds=5,
exposure_ms=1.0,
gain=1.0,
target_fps=30.0,
sharpness=100,
contrast=100,
saturation=100,
gamma=100,
noise_filter_enabled=False,
denoise_3d_enabled=False,
auto_white_balance=True,
color_temperature_preset=0,
anti_flicker_enabled=False,
light_frequency=1,
bit_depth=8,
hdr_enabled=False,
hdr_gain_mode=0
)
print("✅ CameraConfigResponse includes auto-recording fields")
print("✅ All auto-recording API models available")
return True
except Exception as e:
print(f"❌ API model error: {e}")
return False
def main():
"""Run all basic tests"""
print("🧪 Auto-Recording Integration Test")
print("=" * 40)
tests = [
test_config_structure,
test_module_imports,
test_camera_mapping,
test_api_models
]
passed = 0
total = len(tests)
for test in tests:
try:
if test():
passed += 1
except Exception as e:
print(f"❌ Test {test.__name__} failed with exception: {e}")
print("\n" + "=" * 40)
print(f"📊 Results: {passed}/{total} tests passed")
if passed == total:
print("🎉 All integration tests passed!")
print("\n📝 Next steps:")
print("1. Start the system: python main.py")
print("2. Run full tests: python tests/test_auto_recording.py")
print("3. Test with MQTT messages to trigger auto-recording")
return True
else:
print(f"⚠️ {total - passed} test(s) failed")
print("Please fix the issues before running the full system")
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)