diff --git a/AI_INTEGRATION_GUIDE.md b/AI_INTEGRATION_GUIDE.md
new file mode 100644
index 0000000..9d881ee
--- /dev/null
+++ b/AI_INTEGRATION_GUIDE.md
@@ -0,0 +1,595 @@
+# ๐ค AI Integration Guide: USDA Vision Camera Streaming for React Projects
+
+This guide is specifically designed for AI assistants to understand and implement the USDA Vision Camera streaming functionality in React applications.
+
+## ๐ System Overview
+
+The USDA Vision Camera system provides live video streaming through REST API endpoints. The streaming uses MJPEG format which is natively supported by HTML `
` tags and can be easily integrated into React components.
+
+### Key Characteristics:
+- **Base URL**: `http://vision:8000` (production) or `http://localhost:8000` (development)
+- **Stream Format**: MJPEG (Motion JPEG)
+- **Content-Type**: `multipart/x-mixed-replace; boundary=frame`
+- **Authentication**: None (add if needed for production)
+- **CORS**: Enabled for all origins (configure for production)
+
+### Base URL Configuration:
+- **Production**: `http://vision:8000` (requires hostname setup)
+- **Development**: `http://localhost:8000` (local testing)
+- **Custom IP**: `http://192.168.1.100:8000` (replace with actual IP)
+- **Custom hostname**: Configure DNS or /etc/hosts as needed
+
+## ๐ API Endpoints Reference
+
+### 1. Get Camera List
+```http
+GET /cameras
+```
+**Response:**
+```json
+{
+ "camera1": {
+ "name": "camera1",
+ "status": "connected",
+ "is_recording": false,
+ "last_checked": "2025-01-28T10:30:00",
+ "device_info": {...}
+ },
+ "camera2": {...}
+}
+```
+
+### 2. Start Camera Stream
+```http
+POST /cameras/{camera_name}/start-stream
+```
+**Response:**
+```json
+{
+ "success": true,
+ "message": "Started streaming for camera camera1"
+}
+```
+
+### 3. Stop Camera Stream
+```http
+POST /cameras/{camera_name}/stop-stream
+```
+**Response:**
+```json
+{
+ "success": true,
+ "message": "Stopped streaming for camera camera1"
+}
+```
+
+### 4. Live Video Stream
+```http
+GET /cameras/{camera_name}/stream
+```
+**Response:** MJPEG video stream
+**Usage:** Set as `src` attribute of HTML `
` element
+
+## โ๏ธ React Integration Examples
+
+### Basic Camera Stream Component
+
+```jsx
+import React, { useState, useEffect } from 'react';
+
+const CameraStream = ({ cameraName, apiBaseUrl = 'http://vision:8000' }) => {
+ const [isStreaming, setIsStreaming] = useState(false);
+ const [error, setError] = useState(null);
+ const [loading, setLoading] = useState(false);
+
+ const startStream = async () => {
+ setLoading(true);
+ setError(null);
+
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/start-stream`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ });
+
+ if (response.ok) {
+ setIsStreaming(true);
+ } else {
+ const errorData = await response.json();
+ setError(errorData.detail || 'Failed to start stream');
+ }
+ } catch (err) {
+ setError(`Network error: ${err.message}`);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ const stopStream = async () => {
+ setLoading(true);
+
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/stop-stream`, {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ },
+ });
+
+ if (response.ok) {
+ setIsStreaming(false);
+ } else {
+ const errorData = await response.json();
+ setError(errorData.detail || 'Failed to stop stream');
+ }
+ } catch (err) {
+ setError(`Network error: ${err.message}`);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ return (
+
+
Camera: {cameraName}
+
+ {/* Video Stream */}
+
+ {isStreaming ? (
+
}`})
setError('Stream connection lost')}
+ />
+ ) : (
+
+ No Stream Active
+
+ )}
+
+
+ {/* Controls */}
+
+
+
+
+
+
+ {/* Error Display */}
+ {error && (
+
+ Error: {error}
+
+ )}
+
+ );
+};
+
+export default CameraStream;
+```
+
+### Multi-Camera Dashboard Component
+
+```jsx
+import React, { useState, useEffect } from 'react';
+import CameraStream from './CameraStream';
+
+const CameraDashboard = ({ apiBaseUrl = 'http://vision:8000' }) => {
+ const [cameras, setCameras] = useState({});
+ const [loading, setLoading] = useState(true);
+ const [error, setError] = useState(null);
+
+ useEffect(() => {
+ fetchCameras();
+
+ // Refresh camera status every 30 seconds
+ const interval = setInterval(fetchCameras, 30000);
+ return () => clearInterval(interval);
+ }, []);
+
+ const fetchCameras = async () => {
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras`);
+ if (response.ok) {
+ const data = await response.json();
+ setCameras(data);
+ setError(null);
+ } else {
+ setError('Failed to fetch cameras');
+ }
+ } catch (err) {
+ setError(`Network error: ${err.message}`);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ if (loading) {
+ return Loading cameras...
;
+ }
+
+ if (error) {
+ return (
+
+ Error: {error}
+
+
+ );
+ }
+
+ return (
+
+
USDA Vision Camera Dashboard
+
+
+ {Object.entries(cameras).map(([cameraName, cameraInfo]) => (
+
+
+
+ {/* Camera Status */}
+
+
Status: {cameraInfo.status}
+
Recording: {cameraInfo.is_recording ? 'Yes' : 'No'}
+
Last Checked: {new Date(cameraInfo.last_checked).toLocaleString()}
+
+
+ ))}
+
+
+ );
+};
+
+export default CameraDashboard;
+```
+
+### Custom Hook for Camera Management
+
+```jsx
+import { useState, useEffect, useCallback } from 'react';
+
+const useCameraStream = (cameraName, apiBaseUrl = 'http://vision:8000') => {
+ const [isStreaming, setIsStreaming] = useState(false);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState(null);
+
+ const startStream = useCallback(async () => {
+ setLoading(true);
+ setError(null);
+
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/start-stream`, {
+ method: 'POST',
+ });
+
+ if (response.ok) {
+ setIsStreaming(true);
+ return { success: true };
+ } else {
+ const errorData = await response.json();
+ const errorMsg = errorData.detail || 'Failed to start stream';
+ setError(errorMsg);
+ return { success: false, error: errorMsg };
+ }
+ } catch (err) {
+ const errorMsg = `Network error: ${err.message}`;
+ setError(errorMsg);
+ return { success: false, error: errorMsg };
+ } finally {
+ setLoading(false);
+ }
+ }, [cameraName, apiBaseUrl]);
+
+ const stopStream = useCallback(async () => {
+ setLoading(true);
+
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/stop-stream`, {
+ method: 'POST',
+ });
+
+ if (response.ok) {
+ setIsStreaming(false);
+ return { success: true };
+ } else {
+ const errorData = await response.json();
+ const errorMsg = errorData.detail || 'Failed to stop stream';
+ setError(errorMsg);
+ return { success: false, error: errorMsg };
+ }
+ } catch (err) {
+ const errorMsg = `Network error: ${err.message}`;
+ setError(errorMsg);
+ return { success: false, error: errorMsg };
+ } finally {
+ setLoading(false);
+ }
+ }, [cameraName, apiBaseUrl]);
+
+ const getStreamUrl = useCallback(() => {
+ return `${apiBaseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`;
+ }, [cameraName, apiBaseUrl]);
+
+ return {
+ isStreaming,
+ loading,
+ error,
+ startStream,
+ stopStream,
+ getStreamUrl,
+ };
+};
+
+export default useCameraStream;
+```
+
+## ๐จ Styling with Tailwind CSS
+
+```jsx
+const CameraStreamTailwind = ({ cameraName }) => {
+ const { isStreaming, loading, error, startStream, stopStream, getStreamUrl } = useCameraStream(cameraName);
+
+ return (
+
+
Camera: {cameraName}
+
+ {/* Stream Container */}
+
+ {isStreaming ? (
+
})
setError('Stream connection lost')}
+ />
+ ) : (
+
+ No Stream Active
+
+ )}
+
+
+ {/* Controls */}
+
+
+
+
+
+
+ {/* Error Display */}
+ {error && (
+
+ Error: {error}
+
+ )}
+
+ );
+};
+```
+
+## ๐ง Configuration Options
+
+### Environment Variables (.env)
+```env
+# Production configuration (using 'vision' hostname)
+REACT_APP_CAMERA_API_URL=http://vision:8000
+REACT_APP_STREAM_REFRESH_INTERVAL=30000
+REACT_APP_STREAM_TIMEOUT=10000
+
+# Development configuration (using localhost)
+# REACT_APP_CAMERA_API_URL=http://localhost:8000
+
+# Custom IP configuration
+# REACT_APP_CAMERA_API_URL=http://192.168.1.100:8000
+```
+
+### API Configuration
+```javascript
+const apiConfig = {
+ baseUrl: process.env.REACT_APP_CAMERA_API_URL || 'http://vision:8000',
+ timeout: parseInt(process.env.REACT_APP_STREAM_TIMEOUT) || 10000,
+ refreshInterval: parseInt(process.env.REACT_APP_STREAM_REFRESH_INTERVAL) || 30000,
+};
+```
+
+### Hostname Setup Guide
+```bash
+# Option 1: Add to /etc/hosts (Linux/Mac)
+echo "127.0.0.1 vision" | sudo tee -a /etc/hosts
+
+# Option 2: Add to hosts file (Windows)
+# Add to C:\Windows\System32\drivers\etc\hosts:
+# 127.0.0.1 vision
+
+# Option 3: Configure DNS
+# Point 'vision' hostname to your server's IP address
+
+# Verify hostname resolution
+ping vision
+```
+
+## ๐จ Important Implementation Notes
+
+### 1. MJPEG Stream Handling
+- Use HTML `
` tag with `src` pointing to stream endpoint
+- Add timestamp query parameter to prevent caching: `?t=${Date.now()}`
+- Handle `onError` event for connection issues
+
+### 2. Error Handling
+- Network errors (fetch failures)
+- HTTP errors (4xx, 5xx responses)
+- Stream connection errors (img onError)
+- Timeout handling for long requests
+
+### 3. Performance Considerations
+- Streams consume bandwidth continuously
+- Stop streams when components unmount
+- Limit concurrent streams based on system capacity
+- Consider lazy loading for multiple cameras
+
+### 4. State Management
+- Track streaming state per camera
+- Handle loading states during API calls
+- Manage error states with user feedback
+- Refresh camera list periodically
+
+## ๐ฑ Mobile Considerations
+
+```jsx
+// Responsive design for mobile
+const mobileStyles = {
+ container: {
+ padding: '10px',
+ maxWidth: '100vw',
+ },
+ stream: {
+ width: '100%',
+ maxWidth: '100vw',
+ height: 'auto',
+ },
+ controls: {
+ display: 'flex',
+ flexDirection: 'column',
+ gap: '8px',
+ },
+};
+```
+
+## ๐งช Testing Integration
+
+```javascript
+// Test API connectivity
+const testConnection = async () => {
+ try {
+ const response = await fetch(`${apiBaseUrl}/health`);
+ return response.ok;
+ } catch {
+ return false;
+ }
+};
+
+// Test camera availability
+const testCamera = async (cameraName) => {
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/test-connection`, {
+ method: 'POST',
+ });
+ return response.ok;
+ } catch {
+ return false;
+ }
+};
+```
+
+## ๐ Additional Files for AI Integration
+
+### TypeScript Definitions
+- `camera-api.types.ts` - Complete TypeScript definitions for all API types
+- `streaming-api.http` - REST Client file with all streaming endpoints
+- `STREAMING_GUIDE.md` - Comprehensive user guide for streaming functionality
+
+### Quick Integration Checklist for AI Assistants
+
+1. **Copy TypeScript types** from `camera-api.types.ts`
+2. **Use API endpoints** from `streaming-api.http`
+3. **Implement error handling** as shown in examples
+4. **Add CORS configuration** if needed for production
+5. **Test with multiple cameras** using provided examples
+
+### Key Integration Points
+
+- **Stream URL Format**: `${baseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`
+- **Start Stream**: `POST /cameras/{name}/start-stream`
+- **Stop Stream**: `POST /cameras/{name}/stop-stream`
+- **Camera List**: `GET /cameras`
+- **Error Handling**: Always wrap in try-catch blocks
+- **Loading States**: Implement for better UX
+
+### Production Considerations
+
+- Configure CORS for specific origins
+- Add authentication if required
+- Implement rate limiting
+- Monitor system resources with multiple streams
+- Add reconnection logic for network issues
+
+This documentation provides everything an AI assistant needs to integrate the USDA Vision Camera streaming functionality into React applications, including complete code examples, error handling, and best practices.
diff --git a/CAMERA_CONFIG_API.md b/CAMERA_CONFIG_API.md
new file mode 100644
index 0000000..cefd91c
--- /dev/null
+++ b/CAMERA_CONFIG_API.md
@@ -0,0 +1,423 @@
+# ๐๏ธ Camera Configuration API Guide
+
+This guide explains how to configure camera settings via API endpoints, including all the advanced settings from your config.json.
+
+## ๐ Configuration Categories
+
+### โ
**Real-time Configurable (No Restart Required)**
+These settings can be changed while the camera is active:
+
+- **Basic**: `exposure_ms`, `gain`, `target_fps`
+- **Image Quality**: `sharpness`, `contrast`, `saturation`, `gamma`
+- **Color**: `auto_white_balance`, `color_temperature_preset`
+- **Advanced**: `anti_flicker_enabled`, `light_frequency`
+- **HDR**: `hdr_enabled`, `hdr_gain_mode`
+
+### โ ๏ธ **Restart Required**
+These settings require camera restart to take effect:
+
+- **Noise Reduction**: `noise_filter_enabled`, `denoise_3d_enabled`
+- **System**: `machine_topic`, `storage_path`, `enabled`, `bit_depth`
+
+## ๐ API Endpoints
+
+### 1. Get Camera Configuration
+```http
+GET /cameras/{camera_name}/config
+```
+
+**Response:**
+```json
+{
+ "name": "camera1",
+ "machine_topic": "vibratory_conveyor",
+ "storage_path": "/storage/camera1",
+ "enabled": true,
+ "exposure_ms": 1.0,
+ "gain": 3.5,
+ "target_fps": 0,
+ "sharpness": 120,
+ "contrast": 110,
+ "saturation": 100,
+ "gamma": 100,
+ "noise_filter_enabled": true,
+ "denoise_3d_enabled": false,
+ "auto_white_balance": true,
+ "color_temperature_preset": 0,
+ "anti_flicker_enabled": true,
+ "light_frequency": 1,
+ "bit_depth": 8,
+ "hdr_enabled": false,
+ "hdr_gain_mode": 0
+}
+```
+
+### 2. Update Camera Configuration
+```http
+PUT /cameras/{camera_name}/config
+Content-Type: application/json
+```
+
+**Request Body (all fields optional):**
+```json
+{
+ "exposure_ms": 2.0,
+ "gain": 4.0,
+ "target_fps": 10.0,
+ "sharpness": 150,
+ "contrast": 120,
+ "saturation": 110,
+ "gamma": 90,
+ "noise_filter_enabled": true,
+ "denoise_3d_enabled": false,
+ "auto_white_balance": false,
+ "color_temperature_preset": 1,
+ "anti_flicker_enabled": true,
+ "light_frequency": 1,
+ "hdr_enabled": false,
+ "hdr_gain_mode": 0
+}
+```
+
+**Response:**
+```json
+{
+ "success": true,
+ "message": "Camera camera1 configuration updated",
+ "updated_settings": ["exposure_ms", "gain", "sharpness"]
+}
+```
+
+### 3. Apply Configuration (Restart Camera)
+```http
+POST /cameras/{camera_name}/apply-config
+```
+
+**Response:**
+```json
+{
+ "success": true,
+ "message": "Configuration applied to camera camera1"
+}
+```
+
+## ๐ Setting Ranges and Descriptions
+
+### Basic Settings
+| Setting | Range | Default | Description |
+|---------|-------|---------|-------------|
+| `exposure_ms` | 0.1 - 1000.0 | 1.0 | Exposure time in milliseconds |
+| `gain` | 0.0 - 20.0 | 3.5 | Camera gain multiplier |
+| `target_fps` | 0.0 - 120.0 | 0 | Target FPS (0 = maximum) |
+
+### Image Quality Settings
+| Setting | Range | Default | Description |
+|---------|-------|---------|-------------|
+| `sharpness` | 0 - 200 | 100 | Image sharpness (100 = no sharpening) |
+| `contrast` | 0 - 200 | 100 | Image contrast (100 = normal) |
+| `saturation` | 0 - 200 | 100 | Color saturation (color cameras only) |
+| `gamma` | 0 - 300 | 100 | Gamma correction (100 = normal) |
+
+### Color Settings
+| Setting | Values | Default | Description |
+|---------|--------|---------|-------------|
+| `auto_white_balance` | true/false | true | Automatic white balance |
+| `color_temperature_preset` | 0-10 | 0 | Color temperature preset (0=auto) |
+
+### Advanced Settings
+| Setting | Values | Default | Description |
+|---------|--------|---------|-------------|
+| `anti_flicker_enabled` | true/false | true | Reduce artificial lighting flicker |
+| `light_frequency` | 0/1 | 1 | Light frequency (0=50Hz, 1=60Hz) |
+| `noise_filter_enabled` | true/false | true | Basic noise filtering |
+| `denoise_3d_enabled` | true/false | false | Advanced 3D denoising |
+
+### HDR Settings
+| Setting | Values | Default | Description |
+|---------|--------|---------|-------------|
+| `hdr_enabled` | true/false | false | High Dynamic Range |
+| `hdr_gain_mode` | 0-3 | 0 | HDR processing mode |
+
+## ๐ Usage Examples
+
+### Example 1: Adjust Exposure and Gain
+```bash
+curl -X PUT http://localhost:8000/cameras/camera1/config \
+ -H "Content-Type: application/json" \
+ -d '{
+ "exposure_ms": 1.5,
+ "gain": 4.0
+ }'
+```
+
+### Example 2: Improve Image Quality
+```bash
+curl -X PUT http://localhost:8000/cameras/camera1/config \
+ -H "Content-Type: application/json" \
+ -d '{
+ "sharpness": 150,
+ "contrast": 120,
+ "gamma": 90
+ }'
+```
+
+### Example 3: Configure for Indoor Lighting
+```bash
+curl -X PUT http://localhost:8000/cameras/camera1/config \
+ -H "Content-Type: application/json" \
+ -d '{
+ "anti_flicker_enabled": true,
+ "light_frequency": 1,
+ "auto_white_balance": false,
+ "color_temperature_preset": 2
+ }'
+```
+
+### Example 4: Enable HDR Mode
+```bash
+curl -X PUT http://localhost:8000/cameras/camera1/config \
+ -H "Content-Type: application/json" \
+ -d '{
+ "hdr_enabled": true,
+ "hdr_gain_mode": 1
+ }'
+```
+
+## โ๏ธ React Integration Examples
+
+### Camera Configuration Component
+```jsx
+import React, { useState, useEffect } from 'react';
+
+const CameraConfig = ({ cameraName, apiBaseUrl = 'http://localhost:8000' }) => {
+ const [config, setConfig] = useState(null);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState(null);
+
+ // Load current configuration
+ useEffect(() => {
+ fetchConfig();
+ }, [cameraName]);
+
+ const fetchConfig = async () => {
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/config`);
+ if (response.ok) {
+ const data = await response.json();
+ setConfig(data);
+ } else {
+ setError('Failed to load configuration');
+ }
+ } catch (err) {
+ setError(`Error: ${err.message}`);
+ }
+ };
+
+ const updateConfig = async (updates) => {
+ setLoading(true);
+ try {
+ const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/config`, {
+ method: 'PUT',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify(updates)
+ });
+
+ if (response.ok) {
+ const result = await response.json();
+ console.log('Updated settings:', result.updated_settings);
+ await fetchConfig(); // Reload configuration
+ } else {
+ const error = await response.json();
+ setError(error.detail || 'Update failed');
+ }
+ } catch (err) {
+ setError(`Error: ${err.message}`);
+ } finally {
+ setLoading(false);
+ }
+ };
+
+ const handleSliderChange = (setting, value) => {
+ updateConfig({ [setting]: value });
+ };
+
+ if (!config) return Loading configuration...
;
+
+ return (
+
+
Camera Configuration: {cameraName}
+
+ {/* Basic Settings */}
+
+
+ {/* Image Quality Settings */}
+
+
+ {/* Advanced Settings */}
+
+
+ {error && (
+
+ {error}
+
+ )}
+
+ {loading &&
Updating configuration...
}
+
+ );
+};
+
+export default CameraConfig;
+```
+
+## ๐ Configuration Workflow
+
+### 1. Real-time Adjustments
+For settings that don't require restart:
+```bash
+# Update settings
+curl -X PUT /cameras/camera1/config -d '{"exposure_ms": 2.0}'
+
+# Settings take effect immediately
+# Continue recording/streaming without interruption
+```
+
+### 2. Settings Requiring Restart
+For noise reduction and system settings:
+```bash
+# Update settings
+curl -X PUT /cameras/camera1/config -d '{"noise_filter_enabled": false}'
+
+# Apply configuration (restarts camera)
+curl -X POST /cameras/camera1/apply-config
+
+# Camera reinitializes with new settings
+```
+
+## ๐จ Important Notes
+
+### Camera State During Updates
+- **Real-time settings**: Applied immediately, no interruption
+- **Restart-required settings**: Saved to config, applied on next restart
+- **Recording**: Continues during real-time updates
+- **Streaming**: Continues during real-time updates
+
+### Error Handling
+- Invalid ranges return HTTP 422 with validation errors
+- Camera not found returns HTTP 404
+- SDK errors are logged and return HTTP 500
+
+### Performance Impact
+- **Image quality settings**: Minimal performance impact
+- **Noise reduction**: May reduce FPS when enabled
+- **HDR**: Significant processing overhead when enabled
+
+This comprehensive API allows you to control all camera settings programmatically, making it perfect for integration with React dashboards or automated optimization systems!
diff --git a/STREAMING_GUIDE.md b/STREAMING_GUIDE.md
new file mode 100644
index 0000000..ca55700
--- /dev/null
+++ b/STREAMING_GUIDE.md
@@ -0,0 +1,240 @@
+# ๐ฅ USDA Vision Camera Live Streaming Guide
+
+This guide explains how to use the new live preview streaming functionality that allows you to view camera feeds in real-time without blocking recording operations.
+
+## ๐ Key Features
+
+- **Non-blocking streaming**: Live preview doesn't interfere with recording
+- **Separate camera connections**: Streaming uses independent camera instances
+- **MJPEG streaming**: Standard web-compatible video streaming
+- **Multiple concurrent viewers**: Multiple browsers can view the same stream
+- **REST API control**: Start/stop streaming via API endpoints
+- **Web interface**: Ready-to-use HTML interface for live preview
+
+## ๐๏ธ Architecture
+
+The streaming system creates separate camera connections for preview that are independent from recording:
+
+```
+Camera Hardware
+โโโ Recording Connection (CameraRecorder)
+โ โโโ Used for video file recording
+โ โโโ Triggered by MQTT machine states
+โ โโโ High quality, full FPS
+โโโ Streaming Connection (CameraStreamer)
+ โโโ Used for live preview
+ โโโ Controlled via API endpoints
+ โโโ Optimized for web viewing (lower FPS, JPEG compression)
+```
+
+## ๐ Quick Start
+
+### 1. Start the System
+```bash
+python main.py
+```
+
+### 2. Open the Web Interface
+Open `camera_preview.html` in your browser and click "Start Stream" for any camera.
+
+### 3. API Usage
+```bash
+# Start streaming for camera1
+curl -X POST http://localhost:8000/cameras/camera1/start-stream
+
+# View live stream (open in browser)
+http://localhost:8000/cameras/camera1/stream
+
+# Stop streaming
+curl -X POST http://localhost:8000/cameras/camera1/stop-stream
+```
+
+## ๐ก API Endpoints
+
+### Start Streaming
+```http
+POST /cameras/{camera_name}/start-stream
+```
+**Response:**
+```json
+{
+ "success": true,
+ "message": "Started streaming for camera camera1"
+}
+```
+
+### Stop Streaming
+```http
+POST /cameras/{camera_name}/stop-stream
+```
+**Response:**
+```json
+{
+ "success": true,
+ "message": "Stopped streaming for camera camera1"
+}
+```
+
+### Live Stream (MJPEG)
+```http
+GET /cameras/{camera_name}/stream
+```
+**Response:** Multipart MJPEG stream
+**Content-Type:** `multipart/x-mixed-replace; boundary=frame`
+
+## ๐ Web Interface Usage
+
+The included `camera_preview.html` provides a complete web interface:
+
+1. **Camera Grid**: Shows all configured cameras
+2. **Stream Controls**: Start/Stop/Refresh buttons for each camera
+3. **Live Preview**: Real-time video feed display
+4. **Status Information**: System and camera status
+5. **Responsive Design**: Works on desktop and mobile
+
+### Features:
+- โ
Real-time camera status
+- โ
One-click stream start/stop
+- โ
Automatic stream refresh
+- โ
System health monitoring
+- โ
Error handling and status messages
+
+## ๐ง Technical Details
+
+### Camera Streamer Configuration
+- **Preview FPS**: 10 FPS (configurable)
+- **JPEG Quality**: 70% (configurable)
+- **Frame Buffer**: 5 frames (prevents memory buildup)
+- **Timeout**: 200ms per frame capture
+
+### Memory Management
+- Automatic frame buffer cleanup
+- Queue-based frame management
+- Proper camera resource cleanup on stop
+
+### Thread Safety
+- Thread-safe streaming operations
+- Independent from recording threads
+- Proper synchronization with locks
+
+## ๐งช Testing
+
+### Run the Test Script
+```bash
+python test_streaming.py
+```
+
+This will test:
+- โ
API endpoint functionality
+- โ
Stream start/stop operations
+- โ
Concurrent recording and streaming
+- โ
Error handling
+
+### Manual Testing
+1. Start the system: `python main.py`
+2. Open `camera_preview.html` in browser
+3. Start streaming for a camera
+4. Trigger recording via MQTT or manual API
+5. Verify both work simultaneously
+
+## ๐ Concurrent Operations
+
+The system supports these concurrent operations:
+
+| Operation | Recording | Streaming | Notes |
+|-----------|-----------|-----------|-------|
+| Recording Only | โ
| โ | Normal operation |
+| Streaming Only | โ | โ
| Preview without recording |
+| Both Concurrent | โ
| โ
| **Independent connections** |
+
+### Example: Concurrent Usage
+```bash
+# Start streaming
+curl -X POST http://localhost:8000/cameras/camera1/start-stream
+
+# Start recording (while streaming continues)
+curl -X POST http://localhost:8000/cameras/camera1/start-recording \
+ -H "Content-Type: application/json" \
+ -d '{"filename": "test_recording.avi"}'
+
+# Both operations run independently!
+```
+
+## ๐ ๏ธ Configuration
+
+### Stream Settings (in CameraStreamer)
+```python
+self.preview_fps = 10.0 # Lower FPS for preview
+self.preview_quality = 70 # JPEG quality (1-100)
+self._frame_queue.maxsize = 5 # Frame buffer size
+```
+
+### Camera Settings
+The streamer uses the same camera configuration as recording:
+- Exposure time from `camera_config.exposure_ms`
+- Gain from `camera_config.gain`
+- Optimized trigger mode for continuous streaming
+
+## ๐จ Important Notes
+
+### Camera Access Patterns
+- **Recording**: Blocks camera during active recording
+- **Streaming**: Uses separate connection, doesn't block
+- **Health Checks**: Brief, non-blocking camera tests
+- **Multiple Streams**: Multiple browsers can view same stream
+
+### Performance Considerations
+- Streaming uses additional CPU/memory resources
+- Lower preview FPS reduces system load
+- JPEG compression reduces bandwidth usage
+- Frame queue prevents memory buildup
+
+### Error Handling
+- Automatic camera resource cleanup
+- Graceful handling of camera disconnections
+- Stream auto-restart capabilities
+- Detailed error logging
+
+## ๐ Troubleshooting
+
+### Stream Not Starting
+1. Check camera availability: `GET /cameras`
+2. Verify camera not in error state
+3. Check system logs for camera initialization errors
+4. Try camera reconnection: `POST /cameras/{name}/reconnect`
+
+### Poor Stream Quality
+1. Adjust `preview_quality` setting (higher = better quality)
+2. Increase `preview_fps` for smoother video
+3. Check network bandwidth
+4. Verify camera exposure/gain settings
+
+### Browser Issues
+1. Try different browser (Chrome/Firefox recommended)
+2. Check browser console for JavaScript errors
+3. Verify CORS settings in API server
+4. Clear browser cache and refresh
+
+## ๐ Future Enhancements
+
+Potential improvements for the streaming system:
+
+- ๐ WebRTC support for lower latency
+- ๐ฑ Mobile app integration
+- ๐๏ธ Real-time camera setting adjustments
+- ๐ Stream analytics and monitoring
+- ๐ Authentication and access control
+- ๐ Multi-camera synchronized viewing
+
+## ๐ Support
+
+For issues with streaming functionality:
+
+1. Check the system logs: `usda_vision_system.log`
+2. Run the test script: `python test_streaming.py`
+3. Verify API health: `http://localhost:8000/health`
+4. Check camera status: `http://localhost:8000/cameras`
+
+---
+
+**โ
Live streaming is now ready for production use!**
diff --git a/api-endpoints.http b/api-endpoints.http
index 0476502..85c00ca 100644
--- a/api-endpoints.http
+++ b/api-endpoints.http
@@ -1,14 +1,74 @@
###############################################################################
# USDA Vision Camera System - Complete API Endpoints Documentation
-# Base URL: http://localhost:8000
+#
+# CONFIGURATION:
+# - Default Base URL: http://localhost:8000 (local development)
+# - Production Base URL: http://vision:8000 (when using hostname 'vision')
+# - Custom hostname: Update @baseUrl variable below
+#
+# HOSTNAME SETUP:
+# To use 'vision' hostname instead of 'localhost':
+# 1. Add to /etc/hosts: 127.0.0.1 vision
+# 2. Or configure DNS to point 'vision' to the server IP
+# 3. Update camera_preview.html: API_BASE = 'http://vision:8000'
###############################################################################
+# Base URL Configuration - Change this to match your setup
+@baseUrl = http://vision:8000
+# Alternative configurations:
+# @baseUrl = http://localhost:8000 # Local development
+# @baseUrl = http://192.168.1.100:8000 # Specific IP address
+# @baseUrl = http://your-server:8000 # Custom hostname
+
+###############################################################################
+# CONFIGURATION GUIDE
+###############################################################################
+
+### HOSTNAME CONFIGURATION OPTIONS:
+
+# Option 1: Using 'vision' hostname (recommended for production)
+# - Requires hostname resolution setup
+# - Add to /etc/hosts: 127.0.0.1 vision
+# - Or configure DNS: vision -> server IP address
+# - Update camera_preview.html: API_BASE = 'http://vision:8000'
+# - Set @baseUrl = http://vision:8000
+
+# Option 2: Using localhost (development)
+# - Works immediately on local machine
+# - Set @baseUrl = http://localhost:8000
+# - Update camera_preview.html: API_BASE = 'http://localhost:8000'
+
+# Option 3: Using specific IP address
+# - Replace with actual server IP
+# - Set @baseUrl = http://192.168.1.100:8000
+# - Update camera_preview.html: API_BASE = 'http://192.168.1.100:8000'
+
+# Option 4: Custom hostname
+# - Configure DNS or /etc/hosts for custom name
+# - Set @baseUrl = http://your-custom-name:8000
+# - Update camera_preview.html: API_BASE = 'http://your-custom-name:8000'
+
+### NETWORK CONFIGURATION:
+# - Default port: 8000
+# - CORS enabled for all origins (configure for production)
+# - No authentication required (add if needed)
+
+### CLIENT CONFIGURATION FILES TO UPDATE:
+# 1. camera_preview.html - Update API_BASE constant
+# 2. React projects - Update apiConfig.baseUrl
+# 3. This file - Update @baseUrl variable
+# 4. Any custom scripts - Update base URL
+
+### TESTING CONNECTIVITY:
+# Test if the API is reachable:
+GET {{baseUrl}}/health
+
###############################################################################
# SYSTEM ENDPOINTS
###############################################################################
### Root endpoint - API information
-GET http://localhost:8000/
+GET {{baseUrl}}/
# Response: SuccessResponse
# {
# "success": true,
@@ -427,3 +487,14 @@ Content-Type: application/json
# - fps omitted: Uses camera config default
# 6. Filenames automatically get datetime prefix: YYYYMMDD_HHMMSS_filename.avi
# 7. Recovery endpoints should be used in order: test-connection โ reconnect โ restart-grab โ full-reset โ reinitialize
+
+
+
+### Start streaming for camera1
+curl -X POST http://localhost:8000/cameras/camera1/start-stream
+
+# View live stream (open in browser)
+# http://localhost:8000/cameras/camera1/stream
+
+### Stop streaming
+curl -X POST http://localhost:8000/cameras/camera1/stop-stream
\ No newline at end of file
diff --git a/camera-api.types.ts b/camera-api.types.ts
new file mode 100644
index 0000000..3610ac8
--- /dev/null
+++ b/camera-api.types.ts
@@ -0,0 +1,367 @@
+/**
+ * TypeScript definitions for USDA Vision Camera System API
+ *
+ * This file provides complete type definitions for AI assistants
+ * to integrate the camera streaming functionality into React/TypeScript projects.
+ */
+
+// =============================================================================
+// BASE CONFIGURATION
+// =============================================================================
+
+export interface ApiConfig {
+ baseUrl: string;
+ timeout?: number;
+ refreshInterval?: number;
+}
+
+export const defaultApiConfig: ApiConfig = {
+ baseUrl: 'http://vision:8000', // Production default, change to 'http://localhost:8000' for development
+ timeout: 10000,
+ refreshInterval: 30000,
+};
+
+// =============================================================================
+// CAMERA TYPES
+// =============================================================================
+
+export interface CameraDeviceInfo {
+ friendly_name?: string;
+ port_type?: string;
+ serial_number?: string;
+ device_index?: number;
+ error?: string;
+}
+
+export interface CameraInfo {
+ name: string;
+ status: 'connected' | 'disconnected' | 'error' | 'not_found' | 'available';
+ is_recording: boolean;
+ last_checked: string; // ISO date string
+ last_error?: string | null;
+ device_info?: CameraDeviceInfo;
+ current_recording_file?: string | null;
+ recording_start_time?: string | null; // ISO date string
+}
+
+export interface CameraListResponse {
+ [cameraName: string]: CameraInfo;
+}
+
+// =============================================================================
+// STREAMING TYPES
+// =============================================================================
+
+export interface StreamStartRequest {
+ // No body required - camera name is in URL path
+}
+
+export interface StreamStartResponse {
+ success: boolean;
+ message: string;
+}
+
+export interface StreamStopRequest {
+ // No body required - camera name is in URL path
+}
+
+export interface StreamStopResponse {
+ success: boolean;
+ message: string;
+}
+
+export interface StreamStatus {
+ isStreaming: boolean;
+ streamUrl?: string;
+ error?: string;
+}
+
+// =============================================================================
+// RECORDING TYPES
+// =============================================================================
+
+export interface StartRecordingRequest {
+ filename?: string;
+ exposure_ms?: number;
+ gain?: number;
+ fps?: number;
+}
+
+export interface StartRecordingResponse {
+ success: boolean;
+ message: string;
+ filename?: string;
+}
+
+export interface StopRecordingResponse {
+ success: boolean;
+ message: string;
+}
+
+// =============================================================================
+// SYSTEM TYPES
+// =============================================================================
+
+export interface SystemStatusResponse {
+ status: string;
+ uptime: string;
+ api_server_running: boolean;
+ camera_manager_running: boolean;
+ mqtt_client_connected: boolean;
+ total_cameras: number;
+ active_recordings: number;
+ active_streams?: number;
+}
+
+export interface HealthResponse {
+ status: 'healthy' | 'unhealthy';
+ timestamp: string;
+}
+
+// =============================================================================
+// ERROR TYPES
+// =============================================================================
+
+export interface ApiError {
+ detail: string;
+ status_code?: number;
+}
+
+export interface StreamError extends Error {
+ type: 'network' | 'api' | 'stream' | 'timeout';
+ cameraName: string;
+ originalError?: Error;
+}
+
+// =============================================================================
+// HOOK TYPES
+// =============================================================================
+
+export interface UseCameraStreamResult {
+ isStreaming: boolean;
+ loading: boolean;
+ error: string | null;
+ startStream: () => Promise<{ success: boolean; error?: string }>;
+ stopStream: () => Promise<{ success: boolean; error?: string }>;
+ getStreamUrl: () => string;
+ refreshStream: () => void;
+}
+
+export interface UseCameraListResult {
+ cameras: CameraListResponse;
+ loading: boolean;
+ error: string | null;
+ refreshCameras: () => Promise;
+}
+
+export interface UseCameraRecordingResult {
+ isRecording: boolean;
+ loading: boolean;
+ error: string | null;
+ currentFile: string | null;
+ startRecording: (options?: StartRecordingRequest) => Promise<{ success: boolean; error?: string }>;
+ stopRecording: () => Promise<{ success: boolean; error?: string }>;
+}
+
+// =============================================================================
+// COMPONENT PROPS TYPES
+// =============================================================================
+
+export interface CameraStreamProps {
+ cameraName: string;
+ apiConfig?: ApiConfig;
+ autoStart?: boolean;
+ onStreamStart?: (cameraName: string) => void;
+ onStreamStop?: (cameraName: string) => void;
+ onError?: (error: StreamError) => void;
+ className?: string;
+ style?: React.CSSProperties;
+}
+
+export interface CameraDashboardProps {
+ apiConfig?: ApiConfig;
+ cameras?: string[]; // If provided, only show these cameras
+ showRecordingControls?: boolean;
+ showStreamingControls?: boolean;
+ refreshInterval?: number;
+ onCameraSelect?: (cameraName: string) => void;
+ className?: string;
+}
+
+export interface CameraControlsProps {
+ cameraName: string;
+ apiConfig?: ApiConfig;
+ showRecording?: boolean;
+ showStreaming?: boolean;
+ onAction?: (action: 'start-stream' | 'stop-stream' | 'start-recording' | 'stop-recording', cameraName: string) => void;
+}
+
+// =============================================================================
+// API CLIENT TYPES
+// =============================================================================
+
+export interface CameraApiClient {
+ // System endpoints
+ getHealth(): Promise;
+ getSystemStatus(): Promise;
+
+ // Camera endpoints
+ getCameras(): Promise;
+ getCameraStatus(cameraName: string): Promise;
+ testCameraConnection(cameraName: string): Promise<{ success: boolean; message: string }>;
+
+ // Streaming endpoints
+ startStream(cameraName: string): Promise;
+ stopStream(cameraName: string): Promise;
+ getStreamUrl(cameraName: string): string;
+
+ // Recording endpoints
+ startRecording(cameraName: string, options?: StartRecordingRequest): Promise;
+ stopRecording(cameraName: string): Promise;
+}
+
+// =============================================================================
+// UTILITY TYPES
+// =============================================================================
+
+export type CameraAction = 'start-stream' | 'stop-stream' | 'start-recording' | 'stop-recording' | 'test-connection';
+
+export interface CameraActionResult {
+ success: boolean;
+ message: string;
+ error?: string;
+}
+
+export interface StreamingState {
+ [cameraName: string]: {
+ isStreaming: boolean;
+ isLoading: boolean;
+ error: string | null;
+ lastStarted?: Date;
+ };
+}
+
+export interface RecordingState {
+ [cameraName: string]: {
+ isRecording: boolean;
+ isLoading: boolean;
+ error: string | null;
+ currentFile: string | null;
+ startTime?: Date;
+ };
+}
+
+// =============================================================================
+// EVENT TYPES
+// =============================================================================
+
+export interface CameraEvent {
+ type: 'stream-started' | 'stream-stopped' | 'stream-error' | 'recording-started' | 'recording-stopped' | 'recording-error';
+ cameraName: string;
+ timestamp: Date;
+ data?: any;
+}
+
+export type CameraEventHandler = (event: CameraEvent) => void;
+
+// =============================================================================
+// CONFIGURATION TYPES
+// =============================================================================
+
+export interface StreamConfig {
+ fps: number;
+ quality: number; // 1-100
+ timeout: number;
+ retryAttempts: number;
+ retryDelay: number;
+}
+
+export interface CameraStreamConfig extends StreamConfig {
+ cameraName: string;
+ autoReconnect: boolean;
+ maxReconnectAttempts: number;
+}
+
+// =============================================================================
+// CONTEXT TYPES (for React Context)
+// =============================================================================
+
+export interface CameraContextValue {
+ cameras: CameraListResponse;
+ streamingState: StreamingState;
+ recordingState: RecordingState;
+ apiClient: CameraApiClient;
+
+ // Actions
+ startStream: (cameraName: string) => Promise;
+ stopStream: (cameraName: string) => Promise;
+ startRecording: (cameraName: string, options?: StartRecordingRequest) => Promise;
+ stopRecording: (cameraName: string) => Promise;
+ refreshCameras: () => Promise;
+
+ // State
+ loading: boolean;
+ error: string | null;
+}
+
+// =============================================================================
+// EXAMPLE USAGE TYPES
+// =============================================================================
+
+/**
+ * Example usage in React component:
+ *
+ * ```typescript
+ * import { CameraStreamProps, UseCameraStreamResult } from './camera-api.types';
+ *
+ * const CameraStream: React.FC = ({
+ * cameraName,
+ * apiConfig = defaultApiConfig,
+ * autoStart = false,
+ * onStreamStart,
+ * onStreamStop,
+ * onError
+ * }) => {
+ * const {
+ * isStreaming,
+ * loading,
+ * error,
+ * startStream,
+ * stopStream,
+ * getStreamUrl
+ * }: UseCameraStreamResult = useCameraStream(cameraName, apiConfig);
+ *
+ * // Component implementation...
+ * };
+ * ```
+ */
+
+/**
+ * Example API client usage:
+ *
+ * ```typescript
+ * const apiClient: CameraApiClient = new CameraApiClientImpl(defaultApiConfig);
+ *
+ * // Start streaming
+ * const result = await apiClient.startStream('camera1');
+ * if (result.success) {
+ * const streamUrl = apiClient.getStreamUrl('camera1');
+ * // Use streamUrl in img tag
+ * }
+ * ```
+ */
+
+/**
+ * Example hook usage:
+ *
+ * ```typescript
+ * const MyComponent = () => {
+ * const { cameras, loading, error, refreshCameras } = useCameraList();
+ * const { isStreaming, startStream, stopStream } = useCameraStream('camera1');
+ *
+ * // Component logic...
+ * };
+ * ```
+ */
+
+export default {};
diff --git a/camera_preview.html b/camera_preview.html
new file mode 100644
index 0000000..99d321e
--- /dev/null
+++ b/camera_preview.html
@@ -0,0 +1,336 @@
+
+
+
+
+
+ USDA Vision Camera Live Preview
+
+
+
+
+
๐ฅ USDA Vision Camera Live Preview
+
+
+
+
+
+
+
๐ก System Information
+
Loading system status...
+
+
๐ API Endpoints
+
+
Live Stream: GET /cameras/{camera_name}/stream
+
Start Stream: POST /cameras/{camera_name}/start-stream
+
Stop Stream: POST /cameras/{camera_name}/stop-stream
+
Camera Status: GET /cameras
+
+
+
+
+
+
+
diff --git a/streaming-api.http b/streaming-api.http
new file mode 100644
index 0000000..8e06df9
--- /dev/null
+++ b/streaming-api.http
@@ -0,0 +1,524 @@
+### USDA Vision Camera Streaming API
+###
+### CONFIGURATION:
+### - Production: http://vision:8000 (requires hostname setup)
+### - Development: http://localhost:8000
+### - Custom: Update @baseUrl below to match your setup
+###
+### This file contains streaming-specific API endpoints for live camera preview
+### Use with VS Code REST Client extension or similar tools.
+
+# Base URL - Update to match your configuration
+@baseUrl = http://vision:8000
+# Alternative: @baseUrl = http://localhost:8000
+
+### =============================================================================
+### STREAMING ENDPOINTS (NEW FUNCTIONALITY)
+### =============================================================================
+
+### Start camera streaming for live preview
+### This creates a separate camera connection that doesn't interfere with recording
+POST {{baseUrl}}/cameras/camera1/start-stream
+Content-Type: application/json
+
+### Expected Response:
+# {
+# "success": true,
+# "message": "Started streaming for camera camera1"
+# }
+
+###
+
+### Stop camera streaming
+POST {{baseUrl}}/cameras/camera1/stop-stream
+Content-Type: application/json
+
+### Expected Response:
+# {
+# "success": true,
+# "message": "Stopped streaming for camera camera1"
+# }
+
+###
+
+### Get live MJPEG stream (open in browser or use as img src)
+### This endpoint returns a continuous MJPEG stream
+### Content-Type: multipart/x-mixed-replace; boundary=frame
+GET {{baseUrl}}/cameras/camera1/stream
+
+### Usage in HTML:
+#
+
+### Usage in React:
+#
+
+###
+
+### Start streaming for camera2
+POST {{baseUrl}}/cameras/camera2/start-stream
+Content-Type: application/json
+
+###
+
+### Get live stream for camera2
+GET {{baseUrl}}/cameras/camera2/stream
+
+###
+
+### Stop streaming for camera2
+POST {{baseUrl}}/cameras/camera2/stop-stream
+Content-Type: application/json
+
+### =============================================================================
+### CONCURRENT OPERATIONS TESTING
+### =============================================================================
+
+### Test Scenario: Streaming + Recording Simultaneously
+### This demonstrates that streaming doesn't block recording
+
+### Step 1: Start streaming first
+POST {{baseUrl}}/cameras/camera1/start-stream
+Content-Type: application/json
+
+###
+
+### Step 2: Start recording (while streaming continues)
+POST {{baseUrl}}/cameras/camera1/start-recording
+Content-Type: application/json
+
+{
+ "filename": "concurrent_test.avi"
+}
+
+###
+
+### Step 3: Check both are running
+GET {{baseUrl}}/cameras/camera1
+
+### Expected Response shows both recording and streaming active:
+# {
+# "camera1": {
+# "name": "camera1",
+# "status": "connected",
+# "is_recording": true,
+# "current_recording_file": "concurrent_test.avi",
+# "recording_start_time": "2025-01-28T10:30:00.000Z"
+# }
+# }
+
+###
+
+### Step 4: Stop recording (streaming continues)
+POST {{baseUrl}}/cameras/camera1/stop-recording
+Content-Type: application/json
+
+###
+
+### Step 5: Verify streaming still works
+GET {{baseUrl}}/cameras/camera1/stream
+
+###
+
+### Step 6: Stop streaming
+POST {{baseUrl}}/cameras/camera1/stop-stream
+Content-Type: application/json
+
+### =============================================================================
+### MULTIPLE CAMERA STREAMING
+### =============================================================================
+
+### Start streaming on multiple cameras simultaneously
+POST {{baseUrl}}/cameras/camera1/start-stream
+Content-Type: application/json
+
+###
+
+POST {{baseUrl}}/cameras/camera2/start-stream
+Content-Type: application/json
+
+###
+
+### Check status of all cameras
+GET {{baseUrl}}/cameras
+
+###
+
+### Access multiple streams (open in separate browser tabs)
+GET {{baseUrl}}/cameras/camera1/stream
+
+###
+
+GET {{baseUrl}}/cameras/camera2/stream
+
+###
+
+### Stop all streaming
+POST {{baseUrl}}/cameras/camera1/stop-stream
+Content-Type: application/json
+
+###
+
+POST {{baseUrl}}/cameras/camera2/stop-stream
+Content-Type: application/json
+
+### =============================================================================
+### ERROR TESTING
+### =============================================================================
+
+### Test with invalid camera name
+POST {{baseUrl}}/cameras/invalid_camera/start-stream
+Content-Type: application/json
+
+### Expected Response:
+# {
+# "detail": "Camera streamer not found: invalid_camera"
+# }
+
+###
+
+### Test stream endpoint without starting stream first
+GET {{baseUrl}}/cameras/camera1/stream
+
+### Expected: May return error or empty stream depending on camera state
+
+###
+
+### Test starting stream when camera is in error state
+POST {{baseUrl}}/cameras/camera1/start-stream
+Content-Type: application/json
+
+### If camera has issues, expected response:
+# {
+# "success": false,
+# "message": "Failed to start streaming for camera camera1"
+# }
+
+### =============================================================================
+### INTEGRATION EXAMPLES FOR AI ASSISTANTS
+### =============================================================================
+
+### React Component Integration:
+# const CameraStream = ({ cameraName }) => {
+# const [isStreaming, setIsStreaming] = useState(false);
+#
+# const startStream = async () => {
+# const response = await fetch(`${baseUrl}/cameras/${cameraName}/start-stream`, {
+# method: 'POST'
+# });
+# if (response.ok) {
+# setIsStreaming(true);
+# }
+# };
+#
+# return (
+#
+#
+# {isStreaming && (
+#
}`})
+# )}
+#
+# );
+# };
+
+### JavaScript Fetch Example:
+# const streamAPI = {
+# async startStream(cameraName) {
+# const response = await fetch(`${baseUrl}/cameras/${cameraName}/start-stream`, {
+# method: 'POST',
+# headers: { 'Content-Type': 'application/json' }
+# });
+# return response.json();
+# },
+#
+# async stopStream(cameraName) {
+# const response = await fetch(`${baseUrl}/cameras/${cameraName}/stop-stream`, {
+# method: 'POST',
+# headers: { 'Content-Type': 'application/json' }
+# });
+# return response.json();
+# },
+#
+# getStreamUrl(cameraName) {
+# return `${baseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`;
+# }
+# };
+
+### Vue.js Integration:
+#
+#
+#
+#
![]()
+#
+#
+#
+#
+
+### =============================================================================
+### TROUBLESHOOTING
+### =============================================================================
+
+### If streams don't start:
+# 1. Check camera status: GET /cameras
+# 2. Verify system health: GET /health
+# 3. Test camera connection: POST /cameras/{name}/test-connection
+# 4. Check if camera is already recording (shouldn't matter, but good to know)
+
+### If stream image doesn't load:
+# 1. Verify stream was started: POST /cameras/{name}/start-stream
+# 2. Check browser console for CORS errors
+# 3. Try accessing stream URL directly in browser
+# 4. Add timestamp to prevent caching: ?t=${Date.now()}
+
+### If concurrent operations fail:
+# 1. This should work - streaming and recording use separate connections
+# 2. Check system logs for resource conflicts
+# 3. Verify sufficient system resources (CPU/Memory)
+# 4. Test with one camera first, then multiple
+
+### Performance Notes:
+# - Streaming uses ~10 FPS by default (configurable)
+# - JPEG quality set to 70% (configurable)
+# - Each stream uses additional CPU/memory
+# - Multiple concurrent streams may impact performance
+
+### =============================================================================
+### CAMERA CONFIGURATION ENDPOINTS (NEW)
+### =============================================================================
+
+### Get camera configuration
+GET {{baseUrl}}/cameras/camera1/config
+
+### Expected Response:
+# {
+# "name": "camera1",
+# "machine_topic": "vibratory_conveyor",
+# "storage_path": "/storage/camera1",
+# "enabled": true,
+# "exposure_ms": 1.0,
+# "gain": 3.5,
+# "target_fps": 0,
+# "sharpness": 120,
+# "contrast": 110,
+# "saturation": 100,
+# "gamma": 100,
+# "noise_filter_enabled": true,
+# "denoise_3d_enabled": false,
+# "auto_white_balance": true,
+# "color_temperature_preset": 0,
+# "anti_flicker_enabled": true,
+# "light_frequency": 1,
+# "bit_depth": 8,
+# "hdr_enabled": false,
+# "hdr_gain_mode": 0
+# }
+
+###
+
+### Update basic camera settings (real-time, no restart required)
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "exposure_ms": 2.0,
+ "gain": 4.0,
+ "target_fps": 10.0
+}
+
+###
+
+### Update image quality settings
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "sharpness": 150,
+ "contrast": 120,
+ "saturation": 110,
+ "gamma": 90
+}
+
+###
+
+### Update advanced settings
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "anti_flicker_enabled": true,
+ "light_frequency": 1,
+ "auto_white_balance": false,
+ "color_temperature_preset": 2
+}
+
+###
+
+### Enable HDR mode
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "hdr_enabled": true,
+ "hdr_gain_mode": 1
+}
+
+###
+
+### Update noise reduction settings (requires restart)
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "noise_filter_enabled": false,
+ "denoise_3d_enabled": true
+}
+
+###
+
+### Apply configuration (restart camera with new settings)
+POST {{baseUrl}}/cameras/camera1/apply-config
+
+### Expected Response:
+# {
+# "success": true,
+# "message": "Configuration applied to camera camera1"
+# }
+
+###
+
+### Get camera2 configuration
+GET {{baseUrl}}/cameras/camera2/config
+
+###
+
+### Update camera2 for outdoor lighting
+PUT {{baseUrl}}/cameras/camera2/config
+Content-Type: application/json
+
+{
+ "exposure_ms": 0.5,
+ "gain": 2.0,
+ "sharpness": 130,
+ "contrast": 115,
+ "anti_flicker_enabled": true,
+ "light_frequency": 1
+}
+
+### =============================================================================
+### CONFIGURATION TESTING SCENARIOS
+### =============================================================================
+
+### Scenario 1: Low light optimization
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "exposure_ms": 5.0,
+ "gain": 8.0,
+ "noise_filter_enabled": true,
+ "denoise_3d_enabled": true
+}
+
+###
+
+### Scenario 2: High speed capture
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "exposure_ms": 0.2,
+ "gain": 1.0,
+ "target_fps": 30.0,
+ "sharpness": 180
+}
+
+###
+
+### Scenario 3: Color accuracy for food inspection
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "auto_white_balance": false,
+ "color_temperature_preset": 1,
+ "saturation": 120,
+ "contrast": 105,
+ "gamma": 95
+}
+
+###
+
+### Scenario 4: HDR for high contrast scenes
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "hdr_enabled": true,
+ "hdr_gain_mode": 2,
+ "exposure_ms": 1.0,
+ "gain": 3.0
+}
+
+### =============================================================================
+### ERROR TESTING FOR CONFIGURATION
+### =============================================================================
+
+### Test invalid camera name
+GET {{baseUrl}}/cameras/invalid_camera/config
+
+###
+
+### Test invalid exposure range
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "exposure_ms": 2000.0
+}
+
+### Expected: HTTP 422 validation error
+
+###
+
+### Test invalid gain range
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{
+ "gain": 50.0
+}
+
+### Expected: HTTP 422 validation error
+
+###
+
+### Test empty configuration update
+PUT {{baseUrl}}/cameras/camera1/config
+Content-Type: application/json
+
+{}
+
+### Expected: HTTP 400 "No configuration updates provided"
diff --git a/test_frame_conversion.py b/test_frame_conversion.py
new file mode 100644
index 0000000..3f25385
--- /dev/null
+++ b/test_frame_conversion.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python3
+"""
+Test script to verify the frame conversion fix works correctly.
+"""
+
+import sys
+import os
+import numpy as np
+
+# Add the current directory to Python path
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
+# Add camera SDK to path
+sys.path.append(os.path.join(os.path.dirname(__file__), "camera_sdk"))
+
+try:
+ import mvsdk
+ print("โ
mvsdk imported successfully")
+except ImportError as e:
+ print(f"โ Failed to import mvsdk: {e}")
+ sys.exit(1)
+
+def test_frame_conversion():
+ """Test the frame conversion logic"""
+ print("๐งช Testing frame conversion logic...")
+
+ # Simulate frame data
+ width, height = 640, 480
+ frame_size = width * height * 3 # RGB
+
+ # Create mock frame data
+ mock_frame_data = np.random.randint(0, 255, frame_size, dtype=np.uint8)
+
+ # Create a mock frame buffer (simulate memory address)
+ frame_buffer = mock_frame_data.ctypes.data
+
+ # Create mock FrameHead
+ class MockFrameHead:
+ def __init__(self):
+ self.iWidth = width
+ self.iHeight = height
+ self.uBytes = frame_size
+
+ frame_head = MockFrameHead()
+
+ try:
+ # Test the conversion logic (similar to what's in streamer.py)
+ frame_data_buffer = (mvsdk.c_ubyte * frame_head.uBytes).from_address(frame_buffer)
+ frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
+ frame = frame_data.reshape((frame_head.iHeight, frame_head.iWidth, 3))
+
+ print(f"โ
Frame conversion successful!")
+ print(f" Frame shape: {frame.shape}")
+ print(f" Frame dtype: {frame.dtype}")
+ print(f" Frame size: {frame.size} bytes")
+
+ return True
+
+ except Exception as e:
+ print(f"โ Frame conversion failed: {e}")
+ return False
+
+def main():
+ print("๐ง Frame Conversion Test")
+ print("=" * 40)
+
+ success = test_frame_conversion()
+
+ if success:
+ print("\nโ
Frame conversion fix is working correctly!")
+ print("๐ The streaming issue should be resolved after system restart.")
+ else:
+ print("\nโ Frame conversion fix needs more work.")
+
+ print("\n๐ก To apply the fix:")
+ print("1. Restart the USDA vision system")
+ print("2. Test streaming again")
+
+if __name__ == "__main__":
+ main()
diff --git a/test_streaming.py b/test_streaming.py
new file mode 100644
index 0000000..47672ec
--- /dev/null
+++ b/test_streaming.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python3
+"""
+Test script for camera streaming functionality.
+
+This script tests the new streaming capabilities without interfering with recording.
+"""
+
+import sys
+import os
+import time
+import requests
+import threading
+from datetime import datetime
+
+# Add the current directory to Python path
+sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
+
+def test_api_endpoints():
+ """Test the streaming API endpoints"""
+ base_url = "http://localhost:8000"
+
+ print("๐งช Testing Camera Streaming API Endpoints")
+ print("=" * 50)
+
+ # Test system status
+ try:
+ response = requests.get(f"{base_url}/system/status", timeout=5)
+ if response.status_code == 200:
+ print("โ
System status endpoint working")
+ data = response.json()
+ print(f" System: {data.get('status', 'Unknown')}")
+ print(f" Camera Manager: {'Running' if data.get('camera_manager_running') else 'Stopped'}")
+ else:
+ print(f"โ System status endpoint failed: {response.status_code}")
+ except Exception as e:
+ print(f"โ System status endpoint error: {e}")
+
+ # Test camera list
+ try:
+ response = requests.get(f"{base_url}/cameras", timeout=5)
+ if response.status_code == 200:
+ print("โ
Camera list endpoint working")
+ cameras = response.json()
+ print(f" Found {len(cameras)} cameras: {list(cameras.keys())}")
+
+ # Test streaming for each camera
+ for camera_name in cameras.keys():
+ test_camera_streaming(base_url, camera_name)
+
+ else:
+ print(f"โ Camera list endpoint failed: {response.status_code}")
+ except Exception as e:
+ print(f"โ Camera list endpoint error: {e}")
+
+def test_camera_streaming(base_url, camera_name):
+ """Test streaming for a specific camera"""
+ print(f"\n๐ฅ Testing streaming for {camera_name}")
+ print("-" * 30)
+
+ # Test start streaming
+ try:
+ response = requests.post(f"{base_url}/cameras/{camera_name}/start-stream", timeout=10)
+ if response.status_code == 200:
+ print(f"โ
Start stream endpoint working for {camera_name}")
+ data = response.json()
+ print(f" Response: {data.get('message', 'No message')}")
+ else:
+ print(f"โ Start stream failed for {camera_name}: {response.status_code}")
+ print(f" Error: {response.text}")
+ return
+ except Exception as e:
+ print(f"โ Start stream error for {camera_name}: {e}")
+ return
+
+ # Wait a moment for stream to initialize
+ time.sleep(2)
+
+ # Test stream endpoint (just check if it responds)
+ try:
+ response = requests.get(f"{base_url}/cameras/{camera_name}/stream", timeout=5, stream=True)
+ if response.status_code == 200:
+ print(f"โ
Stream endpoint responding for {camera_name}")
+ print(f" Content-Type: {response.headers.get('content-type', 'Unknown')}")
+
+ # Read a small amount of data to verify it's working
+ chunk_count = 0
+ for chunk in response.iter_content(chunk_size=1024):
+ chunk_count += 1
+ if chunk_count >= 3: # Read a few chunks then stop
+ break
+
+ print(f" Received {chunk_count} data chunks")
+ else:
+ print(f"โ Stream endpoint failed for {camera_name}: {response.status_code}")
+ except Exception as e:
+ print(f"โ Stream endpoint error for {camera_name}: {e}")
+
+ # Test stop streaming
+ try:
+ response = requests.post(f"{base_url}/cameras/{camera_name}/stop-stream", timeout=5)
+ if response.status_code == 200:
+ print(f"โ
Stop stream endpoint working for {camera_name}")
+ data = response.json()
+ print(f" Response: {data.get('message', 'No message')}")
+ else:
+ print(f"โ Stop stream failed for {camera_name}: {response.status_code}")
+ except Exception as e:
+ print(f"โ Stop stream error for {camera_name}: {e}")
+
+def test_concurrent_recording_and_streaming():
+ """Test that streaming doesn't interfere with recording"""
+ base_url = "http://localhost:8000"
+
+ print("\n๐ Testing Concurrent Recording and Streaming")
+ print("=" * 50)
+
+ try:
+ # Get available cameras
+ response = requests.get(f"{base_url}/cameras", timeout=5)
+ if response.status_code != 200:
+ print("โ Cannot get camera list for concurrent test")
+ return
+
+ cameras = response.json()
+ if not cameras:
+ print("โ No cameras available for concurrent test")
+ return
+
+ camera_name = list(cameras.keys())[0] # Use first camera
+ print(f"Using camera: {camera_name}")
+
+ # Start streaming
+ print("1. Starting streaming...")
+ response = requests.post(f"{base_url}/cameras/{camera_name}/start-stream", timeout=10)
+ if response.status_code != 200:
+ print(f"โ Failed to start streaming: {response.text}")
+ return
+
+ time.sleep(2)
+
+ # Start recording
+ print("2. Starting recording...")
+ response = requests.post(f"{base_url}/cameras/{camera_name}/start-recording",
+ json={"filename": "test_concurrent_recording.avi"}, timeout=10)
+ if response.status_code == 200:
+ print("โ
Recording started successfully while streaming")
+ else:
+ print(f"โ Failed to start recording while streaming: {response.text}")
+
+ # Let both run for a few seconds
+ print("3. Running both streaming and recording for 5 seconds...")
+ time.sleep(5)
+
+ # Stop recording
+ print("4. Stopping recording...")
+ response = requests.post(f"{base_url}/cameras/{camera_name}/stop-recording", timeout=5)
+ if response.status_code == 200:
+ print("โ
Recording stopped successfully")
+ else:
+ print(f"โ Failed to stop recording: {response.text}")
+
+ # Stop streaming
+ print("5. Stopping streaming...")
+ response = requests.post(f"{base_url}/cameras/{camera_name}/stop-stream", timeout=5)
+ if response.status_code == 200:
+ print("โ
Streaming stopped successfully")
+ else:
+ print(f"โ Failed to stop streaming: {response.text}")
+
+ print("โ
Concurrent test completed successfully!")
+
+ except Exception as e:
+ print(f"โ Concurrent test error: {e}")
+
+def main():
+ """Main test function"""
+ print("๐ USDA Vision Camera Streaming Test")
+ print("=" * 50)
+ print(f"Test started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
+ print()
+
+ # Wait for system to be ready
+ print("โณ Waiting for system to be ready...")
+ time.sleep(3)
+
+ # Run tests
+ test_api_endpoints()
+ test_concurrent_recording_and_streaming()
+
+ print("\n" + "=" * 50)
+ print("๐ Test completed!")
+ print("\n๐ Next Steps:")
+ print("1. Open camera_preview.html in your browser")
+ print("2. Click 'Start Stream' for any camera")
+ print("3. Verify live preview works without blocking recording")
+ print("4. Test concurrent recording and streaming")
+
+if __name__ == "__main__":
+ main()
diff --git a/usda_vision_system/api/__pycache__/models.cpython-311.pyc b/usda_vision_system/api/__pycache__/models.cpython-311.pyc
index 99a7af9..e7d53f0 100644
Binary files a/usda_vision_system/api/__pycache__/models.cpython-311.pyc and b/usda_vision_system/api/__pycache__/models.cpython-311.pyc differ
diff --git a/usda_vision_system/api/__pycache__/server.cpython-311.pyc b/usda_vision_system/api/__pycache__/server.cpython-311.pyc
index 9b983de..dd12bf2 100644
Binary files a/usda_vision_system/api/__pycache__/server.cpython-311.pyc and b/usda_vision_system/api/__pycache__/server.cpython-311.pyc differ
diff --git a/usda_vision_system/api/models.py b/usda_vision_system/api/models.py
index 02906b4..02b95ea 100644
--- a/usda_vision_system/api/models.py
+++ b/usda_vision_system/api/models.py
@@ -81,6 +81,74 @@ class StartRecordingRequest(BaseModel):
fps: Optional[float] = Field(default=None, description="Target frames per second")
+class CameraConfigRequest(BaseModel):
+ """Camera configuration update request model"""
+
+ # Basic settings
+ exposure_ms: Optional[float] = Field(default=None, ge=0.1, le=1000.0, description="Exposure time in milliseconds")
+ gain: Optional[float] = Field(default=None, ge=0.0, le=20.0, description="Camera gain value")
+ target_fps: Optional[float] = Field(default=None, ge=0.0, le=120.0, description="Target frames per second")
+
+ # Image Quality Settings
+ sharpness: Optional[int] = Field(default=None, ge=0, le=200, description="Sharpness (0-200, default 100)")
+ contrast: Optional[int] = Field(default=None, ge=0, le=200, description="Contrast (0-200, default 100)")
+ saturation: Optional[int] = Field(default=None, ge=0, le=200, description="Saturation (0-200, default 100)")
+ gamma: Optional[int] = Field(default=None, ge=0, le=300, description="Gamma (0-300, default 100)")
+
+ # Noise Reduction
+ noise_filter_enabled: Optional[bool] = Field(default=None, description="Enable basic noise filtering")
+ denoise_3d_enabled: Optional[bool] = Field(default=None, description="Enable advanced 3D denoising")
+
+ # Color Settings (for color cameras)
+ auto_white_balance: Optional[bool] = Field(default=None, description="Enable automatic white balance")
+ color_temperature_preset: Optional[int] = Field(default=None, ge=0, le=10, description="Color temperature preset")
+
+ # Advanced Settings
+ anti_flicker_enabled: Optional[bool] = Field(default=None, description="Reduce artificial lighting flicker")
+ light_frequency: Optional[int] = Field(default=None, ge=0, le=1, description="Light frequency (0=50Hz, 1=60Hz)")
+
+ # HDR Settings
+ hdr_enabled: Optional[bool] = Field(default=None, description="Enable High Dynamic Range")
+ hdr_gain_mode: Optional[int] = Field(default=None, ge=0, le=3, description="HDR processing mode")
+
+
+class CameraConfigResponse(BaseModel):
+ """Camera configuration response model"""
+
+ name: str
+ machine_topic: str
+ storage_path: str
+ enabled: bool
+
+ # Basic settings
+ exposure_ms: float
+ gain: float
+ target_fps: float
+
+ # Image Quality Settings
+ sharpness: int
+ contrast: int
+ saturation: int
+ gamma: int
+
+ # Noise Reduction
+ noise_filter_enabled: bool
+ denoise_3d_enabled: bool
+
+ # Color Settings
+ auto_white_balance: bool
+ color_temperature_preset: int
+
+ # Advanced Settings
+ anti_flicker_enabled: bool
+ light_frequency: int
+ bit_depth: int
+
+ # HDR Settings
+ hdr_enabled: bool
+ hdr_gain_mode: int
+
+
class StartRecordingResponse(BaseModel):
"""Start recording response model"""
diff --git a/usda_vision_system/api/server.py b/usda_vision_system/api/server.py
index 028a596..3fcd136 100644
--- a/usda_vision_system/api/server.py
+++ b/usda_vision_system/api/server.py
@@ -13,7 +13,7 @@ import threading
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Depends, Query
from fastapi.middleware.cors import CORSMiddleware
-from fastapi.responses import JSONResponse
+from fastapi.responses import JSONResponse, StreamingResponse
import uvicorn
from ..core.config import Config
@@ -243,6 +243,149 @@ class APIServer:
self.logger.error(f"Error testing camera connection: {e}")
raise HTTPException(status_code=500, detail=str(e))
+ @self.app.get("/cameras/{camera_name}/stream")
+ async def camera_stream(camera_name: str):
+ """Get live MJPEG stream from camera"""
+ try:
+ if not self.camera_manager:
+ raise HTTPException(status_code=503, detail="Camera manager not available")
+
+ # Get camera streamer
+ streamer = self.camera_manager.get_camera_streamer(camera_name)
+ if not streamer:
+ raise HTTPException(status_code=404, detail=f"Camera {camera_name} not found")
+
+ # Start streaming if not already active
+ if not streamer.is_streaming():
+ success = streamer.start_streaming()
+ if not success:
+ raise HTTPException(status_code=500, detail=f"Failed to start streaming for camera {camera_name}")
+
+ # Return MJPEG stream
+ return StreamingResponse(streamer.get_frame_generator(), media_type="multipart/x-mixed-replace; boundary=frame")
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ self.logger.error(f"Error starting camera stream: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @self.app.post("/cameras/{camera_name}/start-stream")
+ async def start_camera_stream(camera_name: str):
+ """Start streaming for a camera"""
+ try:
+ if not self.camera_manager:
+ raise HTTPException(status_code=503, detail="Camera manager not available")
+
+ success = self.camera_manager.start_camera_streaming(camera_name)
+ if success:
+ return {"success": True, "message": f"Started streaming for camera {camera_name}"}
+ else:
+ return {"success": False, "message": f"Failed to start streaming for camera {camera_name}"}
+
+ except Exception as e:
+ self.logger.error(f"Error starting camera stream: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @self.app.post("/cameras/{camera_name}/stop-stream")
+ async def stop_camera_stream(camera_name: str):
+ """Stop streaming for a camera"""
+ try:
+ if not self.camera_manager:
+ raise HTTPException(status_code=503, detail="Camera manager not available")
+
+ success = self.camera_manager.stop_camera_streaming(camera_name)
+ if success:
+ return {"success": True, "message": f"Stopped streaming for camera {camera_name}"}
+ else:
+ return {"success": False, "message": f"Failed to stop streaming for camera {camera_name}"}
+
+ except Exception as e:
+ self.logger.error(f"Error stopping camera stream: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @self.app.get("/cameras/{camera_name}/config", response_model=CameraConfigResponse)
+ async def get_camera_config(camera_name: str):
+ """Get camera configuration"""
+ try:
+ if not self.camera_manager:
+ raise HTTPException(status_code=503, detail="Camera manager not available")
+
+ config = self.camera_manager.get_camera_config(camera_name)
+ if not config:
+ raise HTTPException(status_code=404, detail=f"Camera {camera_name} not found")
+
+ return CameraConfigResponse(
+ name=config.name,
+ machine_topic=config.machine_topic,
+ storage_path=config.storage_path,
+ enabled=config.enabled,
+ exposure_ms=config.exposure_ms,
+ gain=config.gain,
+ target_fps=config.target_fps,
+ sharpness=config.sharpness,
+ contrast=config.contrast,
+ saturation=config.saturation,
+ gamma=config.gamma,
+ noise_filter_enabled=config.noise_filter_enabled,
+ denoise_3d_enabled=config.denoise_3d_enabled,
+ auto_white_balance=config.auto_white_balance,
+ color_temperature_preset=config.color_temperature_preset,
+ anti_flicker_enabled=config.anti_flicker_enabled,
+ light_frequency=config.light_frequency,
+ bit_depth=config.bit_depth,
+ hdr_enabled=config.hdr_enabled,
+ hdr_gain_mode=config.hdr_gain_mode,
+ )
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ self.logger.error(f"Error getting camera config: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @self.app.put("/cameras/{camera_name}/config")
+ async def update_camera_config(camera_name: str, request: CameraConfigRequest):
+ """Update camera configuration"""
+ try:
+ if not self.camera_manager:
+ raise HTTPException(status_code=503, detail="Camera manager not available")
+
+ # Convert request to dict, excluding None values
+ config_updates = {k: v for k, v in request.dict().items() if v is not None}
+
+ if not config_updates:
+ raise HTTPException(status_code=400, detail="No configuration updates provided")
+
+ success = self.camera_manager.update_camera_config(camera_name, **config_updates)
+ if success:
+ return {"success": True, "message": f"Camera {camera_name} configuration updated", "updated_settings": list(config_updates.keys())}
+ else:
+ raise HTTPException(status_code=404, detail=f"Camera {camera_name} not found or update failed")
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ self.logger.error(f"Error updating camera config: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+ @self.app.post("/cameras/{camera_name}/apply-config")
+ async def apply_camera_config(camera_name: str):
+ """Apply current configuration to active camera (requires camera restart)"""
+ try:
+ if not self.camera_manager:
+ raise HTTPException(status_code=503, detail="Camera manager not available")
+
+ success = self.camera_manager.apply_camera_config(camera_name)
+ if success:
+ return {"success": True, "message": f"Configuration applied to camera {camera_name}"}
+ else:
+ return {"success": False, "message": f"Failed to apply configuration to camera {camera_name}"}
+
+ except Exception as e:
+ self.logger.error(f"Error applying camera config: {e}")
+ raise HTTPException(status_code=500, detail=str(e))
+
@self.app.post("/cameras/{camera_name}/reconnect", response_model=CameraRecoveryResponse)
async def reconnect_camera(camera_name: str):
"""Reconnect to a camera"""
diff --git a/usda_vision_system/camera/__init__.py b/usda_vision_system/camera/__init__.py
index df9d25f..7b737e0 100644
--- a/usda_vision_system/camera/__init__.py
+++ b/usda_vision_system/camera/__init__.py
@@ -8,5 +8,6 @@ using the camera SDK library (mvsdk).
from .manager import CameraManager
from .recorder import CameraRecorder
from .monitor import CameraMonitor
+from .streamer import CameraStreamer
-__all__ = ["CameraManager", "CameraRecorder", "CameraMonitor"]
+__all__ = ["CameraManager", "CameraRecorder", "CameraMonitor", "CameraStreamer"]
diff --git a/usda_vision_system/camera/__pycache__/__init__.cpython-311.pyc b/usda_vision_system/camera/__pycache__/__init__.cpython-311.pyc
index 6e8702c..2e2f366 100644
Binary files a/usda_vision_system/camera/__pycache__/__init__.cpython-311.pyc and b/usda_vision_system/camera/__pycache__/__init__.cpython-311.pyc differ
diff --git a/usda_vision_system/camera/__pycache__/manager.cpython-311.pyc b/usda_vision_system/camera/__pycache__/manager.cpython-311.pyc
index 22c54b7..4755cf0 100644
Binary files a/usda_vision_system/camera/__pycache__/manager.cpython-311.pyc and b/usda_vision_system/camera/__pycache__/manager.cpython-311.pyc differ
diff --git a/usda_vision_system/camera/__pycache__/recorder.cpython-311.pyc b/usda_vision_system/camera/__pycache__/recorder.cpython-311.pyc
index 2ca6a05..61c2c78 100644
Binary files a/usda_vision_system/camera/__pycache__/recorder.cpython-311.pyc and b/usda_vision_system/camera/__pycache__/recorder.cpython-311.pyc differ
diff --git a/usda_vision_system/camera/manager.py b/usda_vision_system/camera/manager.py
index 84def7b..b0c4b9d 100644
--- a/usda_vision_system/camera/manager.py
+++ b/usda_vision_system/camera/manager.py
@@ -22,6 +22,7 @@ from ..core.events import EventSystem, EventType, Event, publish_camera_status_c
from ..core.timezone_utils import format_filename_timestamp
from .recorder import CameraRecorder
from .monitor import CameraMonitor
+from .streamer import CameraStreamer
from .sdk_config import initialize_sdk_with_suppression
@@ -40,6 +41,7 @@ class CameraManager:
# Camera management
self.available_cameras: List[Any] = [] # mvsdk camera device info
self.camera_recorders: Dict[str, CameraRecorder] = {} # camera_name -> recorder
+ self.camera_streamers: Dict[str, CameraStreamer] = {} # camera_name -> streamer
self.camera_monitor: Optional[CameraMonitor] = None
# Threading
@@ -71,6 +73,9 @@ class CameraManager:
# Initialize camera recorders
self._initialize_recorders()
+ # Initialize camera streamers
+ self._initialize_streamers()
+
self.logger.info("Camera manager started successfully")
return True
@@ -93,6 +98,12 @@ class CameraManager:
recorder.stop_recording()
recorder.cleanup()
+ # Stop all active streaming
+ with self._lock:
+ for streamer in self.camera_streamers.values():
+ if streamer.is_streaming():
+ streamer.stop_streaming()
+
self.logger.info("Camera manager stopped")
def _discover_cameras(self) -> None:
@@ -427,3 +438,104 @@ class CameraManager:
self.logger.error(f"Error reinitializing camera {camera_name}: {e}")
self.state_manager.update_camera_status(name=camera_name, status="error", device_info={"error": str(e)})
return False
+
+ def _initialize_streamers(self) -> None:
+ """Initialize camera streamers for configured cameras"""
+ with self._lock:
+ for camera_config in self.config.cameras:
+ if not camera_config.enabled:
+ continue
+
+ try:
+ # Find matching physical camera
+ device_info = self._find_camera_device(camera_config.name)
+ if device_info is None:
+ self.logger.warning(f"No physical camera found for streaming: {camera_config.name}")
+ continue
+
+ # Create streamer
+ streamer = CameraStreamer(camera_config=camera_config, device_info=device_info, state_manager=self.state_manager, event_system=self.event_system)
+
+ # Add streamer to the list
+ self.camera_streamers[camera_config.name] = streamer
+ self.logger.info(f"Successfully created streamer for camera: {camera_config.name}")
+
+ except Exception as e:
+ self.logger.error(f"Error initializing streamer for {camera_config.name}: {e}")
+
+ def get_camera_streamer(self, camera_name: str) -> Optional[CameraStreamer]:
+ """Get camera streamer for a specific camera"""
+ return self.camera_streamers.get(camera_name)
+
+ def start_camera_streaming(self, camera_name: str) -> bool:
+ """Start streaming for a specific camera"""
+ streamer = self.camera_streamers.get(camera_name)
+ if not streamer:
+ self.logger.error(f"Camera streamer not found: {camera_name}")
+ return False
+
+ return streamer.start_streaming()
+
+ def stop_camera_streaming(self, camera_name: str) -> bool:
+ """Stop streaming for a specific camera"""
+ streamer = self.camera_streamers.get(camera_name)
+ if not streamer:
+ self.logger.error(f"Camera streamer not found: {camera_name}")
+ return False
+
+ return streamer.stop_streaming()
+
+ def is_camera_streaming(self, camera_name: str) -> bool:
+ """Check if a camera is currently streaming"""
+ streamer = self.camera_streamers.get(camera_name)
+ if not streamer:
+ return False
+
+ return streamer.is_streaming()
+
+ def get_camera_config(self, camera_name: str) -> Optional[CameraConfig]:
+ """Get camera configuration"""
+ return self.config.get_camera_by_name(camera_name)
+
+ def update_camera_config(self, camera_name: str, **kwargs) -> bool:
+ """Update camera configuration and save to config file"""
+ try:
+ # Update the configuration
+ success = self.config.update_camera_config(camera_name, **kwargs)
+ if success:
+ self.logger.info(f"Updated configuration for camera {camera_name}: {kwargs}")
+ return True
+ else:
+ self.logger.error(f"Failed to update configuration for camera {camera_name}")
+ return False
+ except Exception as e:
+ self.logger.error(f"Error updating camera configuration: {e}")
+ return False
+
+ def apply_camera_config(self, camera_name: str) -> bool:
+ """Apply current configuration to active camera (requires camera restart)"""
+ try:
+ # Get the recorder for this camera
+ recorder = self.camera_recorders.get(camera_name)
+ if not recorder:
+ self.logger.error(f"Camera recorder not found: {camera_name}")
+ return False
+
+ # Stop recording if active
+ was_recording = recorder.is_recording()
+ if was_recording:
+ recorder.stop_recording()
+
+ # Reinitialize the camera with new settings
+ success = self.reinitialize_failed_camera(camera_name)
+
+ if success:
+ self.logger.info(f"Successfully applied configuration to camera {camera_name}")
+ return True
+ else:
+ self.logger.error(f"Failed to apply configuration to camera {camera_name}")
+ return False
+
+ except Exception as e:
+ self.logger.error(f"Error applying camera configuration: {e}")
+ return False
diff --git a/usda_vision_system/camera/recorder.py b/usda_vision_system/camera/recorder.py
index 187754f..ea91753 100644
--- a/usda_vision_system/camera/recorder.py
+++ b/usda_vision_system/camera/recorder.py
@@ -328,6 +328,117 @@ class CameraRecorder:
self.logger.error(f"Error updating camera settings: {e}")
return False
+ def update_advanced_camera_settings(self, **kwargs) -> bool:
+ """Update advanced camera settings dynamically"""
+ if not self.hCamera:
+ self.logger.error("Camera not initialized")
+ return False
+
+ try:
+ settings_updated = False
+
+ # Update basic settings
+ if "exposure_ms" in kwargs and kwargs["exposure_ms"] is not None:
+ mvsdk.CameraSetAeState(self.hCamera, 0)
+ exposure_us = int(kwargs["exposure_ms"] * 1000)
+ mvsdk.CameraSetExposureTime(self.hCamera, exposure_us)
+ self.camera_config.exposure_ms = kwargs["exposure_ms"]
+ settings_updated = True
+
+ if "gain" in kwargs and kwargs["gain"] is not None:
+ gain_value = int(kwargs["gain"] * 100)
+ mvsdk.CameraSetAnalogGain(self.hCamera, gain_value)
+ self.camera_config.gain = kwargs["gain"]
+ settings_updated = True
+
+ if "target_fps" in kwargs and kwargs["target_fps"] is not None:
+ self.camera_config.target_fps = kwargs["target_fps"]
+ settings_updated = True
+
+ # Update image quality settings
+ if "sharpness" in kwargs and kwargs["sharpness"] is not None:
+ mvsdk.CameraSetSharpness(self.hCamera, kwargs["sharpness"])
+ self.camera_config.sharpness = kwargs["sharpness"]
+ settings_updated = True
+
+ if "contrast" in kwargs and kwargs["contrast"] is not None:
+ mvsdk.CameraSetContrast(self.hCamera, kwargs["contrast"])
+ self.camera_config.contrast = kwargs["contrast"]
+ settings_updated = True
+
+ if "gamma" in kwargs and kwargs["gamma"] is not None:
+ mvsdk.CameraSetGamma(self.hCamera, kwargs["gamma"])
+ self.camera_config.gamma = kwargs["gamma"]
+ settings_updated = True
+
+ if "saturation" in kwargs and kwargs["saturation"] is not None and not self.monoCamera:
+ mvsdk.CameraSetSaturation(self.hCamera, kwargs["saturation"])
+ self.camera_config.saturation = kwargs["saturation"]
+ settings_updated = True
+
+ # Update noise reduction settings
+ if "noise_filter_enabled" in kwargs and kwargs["noise_filter_enabled"] is not None:
+ # Note: Noise filter settings may require camera restart to take effect
+ self.camera_config.noise_filter_enabled = kwargs["noise_filter_enabled"]
+ settings_updated = True
+
+ if "denoise_3d_enabled" in kwargs and kwargs["denoise_3d_enabled"] is not None:
+ # Note: 3D denoise settings may require camera restart to take effect
+ self.camera_config.denoise_3d_enabled = kwargs["denoise_3d_enabled"]
+ settings_updated = True
+
+ # Update color settings (for color cameras)
+ if not self.monoCamera:
+ if "auto_white_balance" in kwargs and kwargs["auto_white_balance"] is not None:
+ mvsdk.CameraSetWbMode(self.hCamera, kwargs["auto_white_balance"])
+ self.camera_config.auto_white_balance = kwargs["auto_white_balance"]
+ settings_updated = True
+
+ if "color_temperature_preset" in kwargs and kwargs["color_temperature_preset"] is not None:
+ if not self.camera_config.auto_white_balance:
+ mvsdk.CameraSetPresetClrTemp(self.hCamera, kwargs["color_temperature_preset"])
+ self.camera_config.color_temperature_preset = kwargs["color_temperature_preset"]
+ settings_updated = True
+
+ # Update advanced settings
+ if "anti_flicker_enabled" in kwargs and kwargs["anti_flicker_enabled"] is not None:
+ mvsdk.CameraSetAntiFlick(self.hCamera, kwargs["anti_flicker_enabled"])
+ self.camera_config.anti_flicker_enabled = kwargs["anti_flicker_enabled"]
+ settings_updated = True
+
+ if "light_frequency" in kwargs and kwargs["light_frequency"] is not None:
+ mvsdk.CameraSetLightFrequency(self.hCamera, kwargs["light_frequency"])
+ self.camera_config.light_frequency = kwargs["light_frequency"]
+ settings_updated = True
+
+ # Update HDR settings (if supported)
+ if "hdr_enabled" in kwargs and kwargs["hdr_enabled"] is not None:
+ try:
+ mvsdk.CameraSetHDR(self.hCamera, 1 if kwargs["hdr_enabled"] else 0)
+ self.camera_config.hdr_enabled = kwargs["hdr_enabled"]
+ settings_updated = True
+ except AttributeError:
+ self.logger.warning("HDR functions not available in this SDK version")
+
+ if "hdr_gain_mode" in kwargs and kwargs["hdr_gain_mode"] is not None:
+ try:
+ if self.camera_config.hdr_enabled:
+ mvsdk.CameraSetHDRGainMode(self.hCamera, kwargs["hdr_gain_mode"])
+ self.camera_config.hdr_gain_mode = kwargs["hdr_gain_mode"]
+ settings_updated = True
+ except AttributeError:
+ self.logger.warning("HDR gain mode functions not available in this SDK version")
+
+ if settings_updated:
+ updated_settings = [k for k, v in kwargs.items() if v is not None]
+ self.logger.info(f"Updated camera settings: {updated_settings}")
+
+ return settings_updated
+
+ except Exception as e:
+ self.logger.error(f"Error updating advanced camera settings: {e}")
+ return False
+
def start_recording(self, filename: str) -> bool:
"""Start video recording"""
with self._lock:
diff --git a/usda_vision_system/camera/streamer.py b/usda_vision_system/camera/streamer.py
new file mode 100644
index 0000000..6bfcadc
--- /dev/null
+++ b/usda_vision_system/camera/streamer.py
@@ -0,0 +1,320 @@
+"""
+Camera Streamer for the USDA Vision Camera System.
+
+This module provides live preview streaming from GigE cameras without blocking recording.
+It creates a separate camera connection for streaming that doesn't interfere with recording.
+"""
+
+import sys
+import os
+import threading
+import time
+import logging
+import cv2
+import numpy as np
+import contextlib
+from typing import Optional, Dict, Any, Generator
+from datetime import datetime
+import queue
+
+# Add camera SDK to path
+sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "camera_sdk"))
+import mvsdk
+
+from ..core.config import CameraConfig
+from ..core.state_manager import StateManager
+from ..core.events import EventSystem
+from .sdk_config import ensure_sdk_initialized
+
+
+@contextlib.contextmanager
+def suppress_camera_errors():
+ """Context manager to temporarily suppress camera SDK error output"""
+ # Save original file descriptors
+ original_stderr = os.dup(2)
+ original_stdout = os.dup(1)
+
+ try:
+ # Redirect stderr and stdout to devnull
+ devnull = os.open(os.devnull, os.O_WRONLY)
+ os.dup2(devnull, 2) # stderr
+ os.dup2(devnull, 1) # stdout (in case SDK uses stdout)
+ os.close(devnull)
+
+ yield
+
+ finally:
+ # Restore original file descriptors
+ os.dup2(original_stderr, 2)
+ os.dup2(original_stdout, 1)
+ os.close(original_stderr)
+ os.close(original_stdout)
+
+
+class CameraStreamer:
+ """Provides live preview streaming from cameras without blocking recording"""
+
+ def __init__(self, camera_config: CameraConfig, device_info: Any, state_manager: StateManager, event_system: EventSystem):
+ self.camera_config = camera_config
+ self.device_info = device_info
+ self.state_manager = state_manager
+ self.event_system = event_system
+ self.logger = logging.getLogger(f"{__name__}.{camera_config.name}")
+
+ # Camera handle and properties (separate from recorder)
+ self.hCamera: Optional[int] = None
+ self.cap = None
+ self.monoCamera = False
+ self.frame_buffer = None
+ self.frame_buffer_size = 0
+
+ # Streaming state
+ self.streaming = False
+ self._streaming_thread: Optional[threading.Thread] = None
+ self._stop_streaming_event = threading.Event()
+ self._frame_queue = queue.Queue(maxsize=5) # Buffer for latest frames
+ self._lock = threading.RLock()
+
+ # Stream settings (optimized for preview)
+ self.preview_fps = 10.0 # Lower FPS for preview to reduce load
+ self.preview_quality = 70 # JPEG quality for streaming
+
+ def start_streaming(self) -> bool:
+ """Start streaming preview frames"""
+ with self._lock:
+ if self.streaming:
+ self.logger.warning("Streaming already active")
+ return True
+
+ try:
+ # Initialize camera for streaming
+ if not self._initialize_camera():
+ return False
+
+ # Start streaming thread
+ self._stop_streaming_event.clear()
+ self._streaming_thread = threading.Thread(target=self._streaming_loop, daemon=True)
+ self._streaming_thread.start()
+
+ self.streaming = True
+ self.logger.info(f"Started streaming for camera: {self.camera_config.name}")
+ return True
+
+ except Exception as e:
+ self.logger.error(f"Error starting streaming: {e}")
+ self._cleanup_camera()
+ return False
+
+ def stop_streaming(self) -> bool:
+ """Stop streaming preview frames"""
+ with self._lock:
+ if not self.streaming:
+ return True
+
+ try:
+ # Signal streaming thread to stop
+ self._stop_streaming_event.set()
+
+ # Wait for thread to finish
+ if self._streaming_thread and self._streaming_thread.is_alive():
+ self._streaming_thread.join(timeout=5.0)
+
+ # Cleanup camera resources
+ self._cleanup_camera()
+
+ self.streaming = False
+ self.logger.info(f"Stopped streaming for camera: {self.camera_config.name}")
+ return True
+
+ except Exception as e:
+ self.logger.error(f"Error stopping streaming: {e}")
+ return False
+
+ def get_latest_frame(self) -> Optional[bytes]:
+ """Get the latest frame as JPEG bytes for streaming"""
+ try:
+ # Get latest frame from queue (non-blocking)
+ frame = self._frame_queue.get_nowait()
+
+ # Encode as JPEG
+ _, buffer = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, self.preview_quality])
+ return buffer.tobytes()
+
+ except queue.Empty:
+ return None
+ except Exception as e:
+ self.logger.error(f"Error getting latest frame: {e}")
+ return None
+
+ def get_frame_generator(self) -> Generator[bytes, None, None]:
+ """Generator for MJPEG streaming"""
+ while self.streaming:
+ frame_bytes = self.get_latest_frame()
+ if frame_bytes:
+ yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + frame_bytes + b"\r\n")
+ else:
+ time.sleep(0.1) # Wait a bit if no frame available
+
+ def _initialize_camera(self) -> bool:
+ """Initialize camera for streaming (separate from recording)"""
+ try:
+ self.logger.info(f"Initializing camera for streaming: {self.camera_config.name}")
+
+ # Ensure SDK is initialized
+ ensure_sdk_initialized()
+
+ # Check if device_info is valid
+ if self.device_info is None:
+ self.logger.error("No device info provided for camera initialization")
+ return False
+
+ # Initialize camera (suppress output to avoid MVCAMAPI error messages)
+ with suppress_camera_errors():
+ self.hCamera = mvsdk.CameraInit(self.device_info, -1, -1)
+ self.logger.info("Camera initialized successfully for streaming")
+
+ # Get camera capabilities
+ self.cap = mvsdk.CameraGetCapability(self.hCamera)
+
+ # Determine if camera is monochrome
+ self.monoCamera = self.cap.sIspCapacity.bMonoSensor != 0
+
+ # Set output format based on camera type and bit depth
+ if self.monoCamera:
+ mvsdk.CameraSetIspOutFormat(self.hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
+ else:
+ mvsdk.CameraSetIspOutFormat(self.hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)
+
+ # Configure camera settings for streaming (optimized for preview)
+ self._configure_streaming_settings()
+
+ # Allocate frame buffer
+ bytes_per_pixel = 1 if self.monoCamera else 3
+ self.frame_buffer_size = self.cap.sResolutionRange.iWidthMax * self.cap.sResolutionRange.iHeightMax * bytes_per_pixel
+ self.frame_buffer = mvsdk.CameraAlignMalloc(self.frame_buffer_size, 16)
+
+ # Start camera
+ mvsdk.CameraPlay(self.hCamera)
+ self.logger.info("Camera started successfully for streaming")
+
+ return True
+
+ except Exception as e:
+ self.logger.error(f"Error initializing camera for streaming: {e}")
+ self._cleanup_camera()
+ return False
+
+ def _configure_streaming_settings(self):
+ """Configure camera settings optimized for streaming"""
+ try:
+ # Set trigger mode to free run for continuous streaming
+ mvsdk.CameraSetTriggerMode(self.hCamera, 0)
+
+ # Set exposure (use a reasonable default for preview)
+ exposure_us = int(self.camera_config.exposure_ms * 1000)
+ mvsdk.CameraSetExposureTime(self.hCamera, exposure_us)
+
+ # Set gain
+ mvsdk.CameraSetAnalogGain(self.hCamera, int(self.camera_config.gain))
+
+ # Set frame rate for streaming (lower than recording)
+ if hasattr(mvsdk, "CameraSetFrameSpeed"):
+ mvsdk.CameraSetFrameSpeed(self.hCamera, int(self.preview_fps))
+
+ self.logger.info(f"Streaming settings configured: exposure={self.camera_config.exposure_ms}ms, gain={self.camera_config.gain}, fps={self.preview_fps}")
+
+ except Exception as e:
+ self.logger.warning(f"Could not configure some streaming settings: {e}")
+
+ def _streaming_loop(self):
+ """Main streaming loop that captures frames continuously"""
+ self.logger.info("Starting streaming loop")
+
+ try:
+ while not self._stop_streaming_event.is_set():
+ try:
+ # Capture frame with timeout
+ pRawData, FrameHead = mvsdk.CameraGetImageBuffer(self.hCamera, 200) # 200ms timeout
+
+ # Process frame
+ mvsdk.CameraImageProcess(self.hCamera, pRawData, self.frame_buffer, FrameHead)
+
+ # Convert to OpenCV format
+ frame = self._convert_frame_to_opencv(FrameHead)
+
+ if frame is not None:
+ # Add frame to queue (replace oldest if queue is full)
+ try:
+ self._frame_queue.put_nowait(frame)
+ except queue.Full:
+ # Remove oldest frame and add new one
+ try:
+ self._frame_queue.get_nowait()
+ self._frame_queue.put_nowait(frame)
+ except queue.Empty:
+ pass
+
+ # Release buffer
+ mvsdk.CameraReleaseImageBuffer(self.hCamera, pRawData)
+
+ # Control frame rate
+ time.sleep(1.0 / self.preview_fps)
+
+ except Exception as e:
+ if not self._stop_streaming_event.is_set():
+ self.logger.error(f"Error in streaming loop: {e}")
+ time.sleep(0.1) # Brief pause before retrying
+
+ except Exception as e:
+ self.logger.error(f"Fatal error in streaming loop: {e}")
+ finally:
+ self.logger.info("Streaming loop ended")
+
+ def _convert_frame_to_opencv(self, FrameHead) -> Optional[np.ndarray]:
+ """Convert camera frame to OpenCV format"""
+ try:
+ # Convert the frame buffer memory address to a proper buffer
+ # that numpy can work with using mvsdk.c_ubyte
+ frame_data_buffer = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(self.frame_buffer)
+
+ if self.monoCamera:
+ # Monochrome camera
+ frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
+ frame = frame_data.reshape((FrameHead.iHeight, FrameHead.iWidth))
+ # Convert to 3-channel for consistency
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
+ else:
+ # Color camera (BGR format)
+ frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
+ frame = frame_data.reshape((FrameHead.iHeight, FrameHead.iWidth, 3))
+
+ return frame
+
+ except Exception as e:
+ self.logger.error(f"Error converting frame: {e}")
+ return None
+
+ def _cleanup_camera(self):
+ """Clean up camera resources"""
+ try:
+ if self.frame_buffer:
+ mvsdk.CameraAlignFree(self.frame_buffer)
+ self.frame_buffer = None
+
+ if self.hCamera is not None:
+ mvsdk.CameraUnInit(self.hCamera)
+ self.hCamera = None
+
+ self.logger.info("Camera resources cleaned up for streaming")
+
+ except Exception as e:
+ self.logger.error(f"Error cleaning up camera resources: {e}")
+
+ def is_streaming(self) -> bool:
+ """Check if streaming is active"""
+ return self.streaming
+
+ def __del__(self):
+ """Destructor to ensure cleanup"""
+ if self.streaming:
+ self.stop_streaming()