Add USDA Vision Camera Streaming API and related functionality

- Implemented streaming API endpoints for starting, stopping, and retrieving live streams from cameras.
- Added support for concurrent streaming and recording operations.
- Created test scripts for frame conversion and streaming functionality.
- Developed a CameraStreamer class to manage live preview streaming without blocking recording.
- Included error handling and logging for camera operations.
- Added configuration endpoints for camera settings and real-time updates.
- Enhanced testing scenarios for various camera configurations and error handling.
This commit is contained in:
Alireza Vaezi
2025-07-28 18:09:48 -04:00
parent 7bc8138f24
commit ef0f9f85c5
20 changed files with 3594 additions and 4 deletions

595
AI_INTEGRATION_GUIDE.md Normal file
View File

@@ -0,0 +1,595 @@
# 🤖 AI Integration Guide: USDA Vision Camera Streaming for React Projects
This guide is specifically designed for AI assistants to understand and implement the USDA Vision Camera streaming functionality in React applications.
## 📋 System Overview
The USDA Vision Camera system provides live video streaming through REST API endpoints. The streaming uses MJPEG format which is natively supported by HTML `<img>` tags and can be easily integrated into React components.
### Key Characteristics:
- **Base URL**: `http://vision:8000` (production) or `http://localhost:8000` (development)
- **Stream Format**: MJPEG (Motion JPEG)
- **Content-Type**: `multipart/x-mixed-replace; boundary=frame`
- **Authentication**: None (add if needed for production)
- **CORS**: Enabled for all origins (configure for production)
### Base URL Configuration:
- **Production**: `http://vision:8000` (requires hostname setup)
- **Development**: `http://localhost:8000` (local testing)
- **Custom IP**: `http://192.168.1.100:8000` (replace with actual IP)
- **Custom hostname**: Configure DNS or /etc/hosts as needed
## 🔌 API Endpoints Reference
### 1. Get Camera List
```http
GET /cameras
```
**Response:**
```json
{
"camera1": {
"name": "camera1",
"status": "connected",
"is_recording": false,
"last_checked": "2025-01-28T10:30:00",
"device_info": {...}
},
"camera2": {...}
}
```
### 2. Start Camera Stream
```http
POST /cameras/{camera_name}/start-stream
```
**Response:**
```json
{
"success": true,
"message": "Started streaming for camera camera1"
}
```
### 3. Stop Camera Stream
```http
POST /cameras/{camera_name}/stop-stream
```
**Response:**
```json
{
"success": true,
"message": "Stopped streaming for camera camera1"
}
```
### 4. Live Video Stream
```http
GET /cameras/{camera_name}/stream
```
**Response:** MJPEG video stream
**Usage:** Set as `src` attribute of HTML `<img>` element
## ⚛️ React Integration Examples
### Basic Camera Stream Component
```jsx
import React, { useState, useEffect } from 'react';
const CameraStream = ({ cameraName, apiBaseUrl = 'http://vision:8000' }) => {
const [isStreaming, setIsStreaming] = useState(false);
const [error, setError] = useState(null);
const [loading, setLoading] = useState(false);
const startStream = async () => {
setLoading(true);
setError(null);
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/start-stream`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
});
if (response.ok) {
setIsStreaming(true);
} else {
const errorData = await response.json();
setError(errorData.detail || 'Failed to start stream');
}
} catch (err) {
setError(`Network error: ${err.message}`);
} finally {
setLoading(false);
}
};
const stopStream = async () => {
setLoading(true);
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/stop-stream`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
});
if (response.ok) {
setIsStreaming(false);
} else {
const errorData = await response.json();
setError(errorData.detail || 'Failed to stop stream');
}
} catch (err) {
setError(`Network error: ${err.message}`);
} finally {
setLoading(false);
}
};
return (
<div className="camera-stream">
<h3>Camera: {cameraName}</h3>
{/* Video Stream */}
<div className="stream-container">
{isStreaming ? (
<img
src={`${apiBaseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`}
alt={`${cameraName} live stream`}
style={{
width: '100%',
maxWidth: '640px',
height: 'auto',
border: '2px solid #ddd',
borderRadius: '8px',
}}
onError={() => setError('Stream connection lost')}
/>
) : (
<div style={{
width: '100%',
maxWidth: '640px',
height: '360px',
backgroundColor: '#f0f0f0',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
border: '2px solid #ddd',
borderRadius: '8px',
}}>
<span>No Stream Active</span>
</div>
)}
</div>
{/* Controls */}
<div className="stream-controls" style={{ marginTop: '10px' }}>
<button
onClick={startStream}
disabled={loading || isStreaming}
style={{
padding: '8px 16px',
marginRight: '8px',
backgroundColor: '#28a745',
color: 'white',
border: 'none',
borderRadius: '4px',
cursor: loading ? 'not-allowed' : 'pointer',
}}
>
{loading ? 'Loading...' : 'Start Stream'}
</button>
<button
onClick={stopStream}
disabled={loading || !isStreaming}
style={{
padding: '8px 16px',
backgroundColor: '#dc3545',
color: 'white',
border: 'none',
borderRadius: '4px',
cursor: loading ? 'not-allowed' : 'pointer',
}}
>
{loading ? 'Loading...' : 'Stop Stream'}
</button>
</div>
{/* Error Display */}
{error && (
<div style={{
marginTop: '10px',
padding: '8px',
backgroundColor: '#f8d7da',
color: '#721c24',
border: '1px solid #f5c6cb',
borderRadius: '4px',
}}>
Error: {error}
</div>
)}
</div>
);
};
export default CameraStream;
```
### Multi-Camera Dashboard Component
```jsx
import React, { useState, useEffect } from 'react';
import CameraStream from './CameraStream';
const CameraDashboard = ({ apiBaseUrl = 'http://vision:8000' }) => {
const [cameras, setCameras] = useState({});
const [loading, setLoading] = useState(true);
const [error, setError] = useState(null);
useEffect(() => {
fetchCameras();
// Refresh camera status every 30 seconds
const interval = setInterval(fetchCameras, 30000);
return () => clearInterval(interval);
}, []);
const fetchCameras = async () => {
try {
const response = await fetch(`${apiBaseUrl}/cameras`);
if (response.ok) {
const data = await response.json();
setCameras(data);
setError(null);
} else {
setError('Failed to fetch cameras');
}
} catch (err) {
setError(`Network error: ${err.message}`);
} finally {
setLoading(false);
}
};
if (loading) {
return <div>Loading cameras...</div>;
}
if (error) {
return (
<div style={{ color: 'red', padding: '20px' }}>
Error: {error}
<button onClick={fetchCameras} style={{ marginLeft: '10px' }}>
Retry
</button>
</div>
);
}
return (
<div className="camera-dashboard">
<h1>USDA Vision Camera Dashboard</h1>
<div style={{
display: 'grid',
gridTemplateColumns: 'repeat(auto-fit, minmax(400px, 1fr))',
gap: '20px',
padding: '20px',
}}>
{Object.entries(cameras).map(([cameraName, cameraInfo]) => (
<div key={cameraName} style={{
border: '1px solid #ddd',
borderRadius: '8px',
padding: '15px',
backgroundColor: '#f9f9f9',
}}>
<CameraStream
cameraName={cameraName}
apiBaseUrl={apiBaseUrl}
/>
{/* Camera Status */}
<div style={{ marginTop: '10px', fontSize: '14px' }}>
<div>Status: <strong>{cameraInfo.status}</strong></div>
<div>Recording: <strong>{cameraInfo.is_recording ? 'Yes' : 'No'}</strong></div>
<div>Last Checked: {new Date(cameraInfo.last_checked).toLocaleString()}</div>
</div>
</div>
))}
</div>
</div>
);
};
export default CameraDashboard;
```
### Custom Hook for Camera Management
```jsx
import { useState, useEffect, useCallback } from 'react';
const useCameraStream = (cameraName, apiBaseUrl = 'http://vision:8000') => {
const [isStreaming, setIsStreaming] = useState(false);
const [loading, setLoading] = useState(false);
const [error, setError] = useState(null);
const startStream = useCallback(async () => {
setLoading(true);
setError(null);
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/start-stream`, {
method: 'POST',
});
if (response.ok) {
setIsStreaming(true);
return { success: true };
} else {
const errorData = await response.json();
const errorMsg = errorData.detail || 'Failed to start stream';
setError(errorMsg);
return { success: false, error: errorMsg };
}
} catch (err) {
const errorMsg = `Network error: ${err.message}`;
setError(errorMsg);
return { success: false, error: errorMsg };
} finally {
setLoading(false);
}
}, [cameraName, apiBaseUrl]);
const stopStream = useCallback(async () => {
setLoading(true);
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/stop-stream`, {
method: 'POST',
});
if (response.ok) {
setIsStreaming(false);
return { success: true };
} else {
const errorData = await response.json();
const errorMsg = errorData.detail || 'Failed to stop stream';
setError(errorMsg);
return { success: false, error: errorMsg };
}
} catch (err) {
const errorMsg = `Network error: ${err.message}`;
setError(errorMsg);
return { success: false, error: errorMsg };
} finally {
setLoading(false);
}
}, [cameraName, apiBaseUrl]);
const getStreamUrl = useCallback(() => {
return `${apiBaseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`;
}, [cameraName, apiBaseUrl]);
return {
isStreaming,
loading,
error,
startStream,
stopStream,
getStreamUrl,
};
};
export default useCameraStream;
```
## 🎨 Styling with Tailwind CSS
```jsx
const CameraStreamTailwind = ({ cameraName }) => {
const { isStreaming, loading, error, startStream, stopStream, getStreamUrl } = useCameraStream(cameraName);
return (
<div className="bg-white rounded-lg shadow-md p-6">
<h3 className="text-lg font-semibold mb-4">Camera: {cameraName}</h3>
{/* Stream Container */}
<div className="relative mb-4">
{isStreaming ? (
<img
src={getStreamUrl()}
alt={`${cameraName} live stream`}
className="w-full max-w-2xl h-auto border-2 border-gray-300 rounded-lg"
onError={() => setError('Stream connection lost')}
/>
) : (
<div className="w-full max-w-2xl h-64 bg-gray-100 border-2 border-gray-300 rounded-lg flex items-center justify-center">
<span className="text-gray-500">No Stream Active</span>
</div>
)}
</div>
{/* Controls */}
<div className="flex gap-2 mb-4">
<button
onClick={startStream}
disabled={loading || isStreaming}
className="px-4 py-2 bg-green-500 text-white rounded hover:bg-green-600 disabled:opacity-50 disabled:cursor-not-allowed"
>
{loading ? 'Loading...' : 'Start Stream'}
</button>
<button
onClick={stopStream}
disabled={loading || !isStreaming}
className="px-4 py-2 bg-red-500 text-white rounded hover:bg-red-600 disabled:opacity-50 disabled:cursor-not-allowed"
>
{loading ? 'Loading...' : 'Stop Stream'}
</button>
</div>
{/* Error Display */}
{error && (
<div className="p-3 bg-red-100 border border-red-400 text-red-700 rounded">
Error: {error}
</div>
)}
</div>
);
};
```
## 🔧 Configuration Options
### Environment Variables (.env)
```env
# Production configuration (using 'vision' hostname)
REACT_APP_CAMERA_API_URL=http://vision:8000
REACT_APP_STREAM_REFRESH_INTERVAL=30000
REACT_APP_STREAM_TIMEOUT=10000
# Development configuration (using localhost)
# REACT_APP_CAMERA_API_URL=http://localhost:8000
# Custom IP configuration
# REACT_APP_CAMERA_API_URL=http://192.168.1.100:8000
```
### API Configuration
```javascript
const apiConfig = {
baseUrl: process.env.REACT_APP_CAMERA_API_URL || 'http://vision:8000',
timeout: parseInt(process.env.REACT_APP_STREAM_TIMEOUT) || 10000,
refreshInterval: parseInt(process.env.REACT_APP_STREAM_REFRESH_INTERVAL) || 30000,
};
```
### Hostname Setup Guide
```bash
# Option 1: Add to /etc/hosts (Linux/Mac)
echo "127.0.0.1 vision" | sudo tee -a /etc/hosts
# Option 2: Add to hosts file (Windows)
# Add to C:\Windows\System32\drivers\etc\hosts:
# 127.0.0.1 vision
# Option 3: Configure DNS
# Point 'vision' hostname to your server's IP address
# Verify hostname resolution
ping vision
```
## 🚨 Important Implementation Notes
### 1. MJPEG Stream Handling
- Use HTML `<img>` tag with `src` pointing to stream endpoint
- Add timestamp query parameter to prevent caching: `?t=${Date.now()}`
- Handle `onError` event for connection issues
### 2. Error Handling
- Network errors (fetch failures)
- HTTP errors (4xx, 5xx responses)
- Stream connection errors (img onError)
- Timeout handling for long requests
### 3. Performance Considerations
- Streams consume bandwidth continuously
- Stop streams when components unmount
- Limit concurrent streams based on system capacity
- Consider lazy loading for multiple cameras
### 4. State Management
- Track streaming state per camera
- Handle loading states during API calls
- Manage error states with user feedback
- Refresh camera list periodically
## 📱 Mobile Considerations
```jsx
// Responsive design for mobile
const mobileStyles = {
container: {
padding: '10px',
maxWidth: '100vw',
},
stream: {
width: '100%',
maxWidth: '100vw',
height: 'auto',
},
controls: {
display: 'flex',
flexDirection: 'column',
gap: '8px',
},
};
```
## 🧪 Testing Integration
```javascript
// Test API connectivity
const testConnection = async () => {
try {
const response = await fetch(`${apiBaseUrl}/health`);
return response.ok;
} catch {
return false;
}
};
// Test camera availability
const testCamera = async (cameraName) => {
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/test-connection`, {
method: 'POST',
});
return response.ok;
} catch {
return false;
}
};
```
## 📁 Additional Files for AI Integration
### TypeScript Definitions
- `camera-api.types.ts` - Complete TypeScript definitions for all API types
- `streaming-api.http` - REST Client file with all streaming endpoints
- `STREAMING_GUIDE.md` - Comprehensive user guide for streaming functionality
### Quick Integration Checklist for AI Assistants
1. **Copy TypeScript types** from `camera-api.types.ts`
2. **Use API endpoints** from `streaming-api.http`
3. **Implement error handling** as shown in examples
4. **Add CORS configuration** if needed for production
5. **Test with multiple cameras** using provided examples
### Key Integration Points
- **Stream URL Format**: `${baseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`
- **Start Stream**: `POST /cameras/{name}/start-stream`
- **Stop Stream**: `POST /cameras/{name}/stop-stream`
- **Camera List**: `GET /cameras`
- **Error Handling**: Always wrap in try-catch blocks
- **Loading States**: Implement for better UX
### Production Considerations
- Configure CORS for specific origins
- Add authentication if required
- Implement rate limiting
- Monitor system resources with multiple streams
- Add reconnection logic for network issues
This documentation provides everything an AI assistant needs to integrate the USDA Vision Camera streaming functionality into React applications, including complete code examples, error handling, and best practices.

423
CAMERA_CONFIG_API.md Normal file
View File

@@ -0,0 +1,423 @@
# 🎛️ Camera Configuration API Guide
This guide explains how to configure camera settings via API endpoints, including all the advanced settings from your config.json.
## 📋 Configuration Categories
### ✅ **Real-time Configurable (No Restart Required)**
These settings can be changed while the camera is active:
- **Basic**: `exposure_ms`, `gain`, `target_fps`
- **Image Quality**: `sharpness`, `contrast`, `saturation`, `gamma`
- **Color**: `auto_white_balance`, `color_temperature_preset`
- **Advanced**: `anti_flicker_enabled`, `light_frequency`
- **HDR**: `hdr_enabled`, `hdr_gain_mode`
### ⚠️ **Restart Required**
These settings require camera restart to take effect:
- **Noise Reduction**: `noise_filter_enabled`, `denoise_3d_enabled`
- **System**: `machine_topic`, `storage_path`, `enabled`, `bit_depth`
## 🔌 API Endpoints
### 1. Get Camera Configuration
```http
GET /cameras/{camera_name}/config
```
**Response:**
```json
{
"name": "camera1",
"machine_topic": "vibratory_conveyor",
"storage_path": "/storage/camera1",
"enabled": true,
"exposure_ms": 1.0,
"gain": 3.5,
"target_fps": 0,
"sharpness": 120,
"contrast": 110,
"saturation": 100,
"gamma": 100,
"noise_filter_enabled": true,
"denoise_3d_enabled": false,
"auto_white_balance": true,
"color_temperature_preset": 0,
"anti_flicker_enabled": true,
"light_frequency": 1,
"bit_depth": 8,
"hdr_enabled": false,
"hdr_gain_mode": 0
}
```
### 2. Update Camera Configuration
```http
PUT /cameras/{camera_name}/config
Content-Type: application/json
```
**Request Body (all fields optional):**
```json
{
"exposure_ms": 2.0,
"gain": 4.0,
"target_fps": 10.0,
"sharpness": 150,
"contrast": 120,
"saturation": 110,
"gamma": 90,
"noise_filter_enabled": true,
"denoise_3d_enabled": false,
"auto_white_balance": false,
"color_temperature_preset": 1,
"anti_flicker_enabled": true,
"light_frequency": 1,
"hdr_enabled": false,
"hdr_gain_mode": 0
}
```
**Response:**
```json
{
"success": true,
"message": "Camera camera1 configuration updated",
"updated_settings": ["exposure_ms", "gain", "sharpness"]
}
```
### 3. Apply Configuration (Restart Camera)
```http
POST /cameras/{camera_name}/apply-config
```
**Response:**
```json
{
"success": true,
"message": "Configuration applied to camera camera1"
}
```
## 📊 Setting Ranges and Descriptions
### Basic Settings
| Setting | Range | Default | Description |
|---------|-------|---------|-------------|
| `exposure_ms` | 0.1 - 1000.0 | 1.0 | Exposure time in milliseconds |
| `gain` | 0.0 - 20.0 | 3.5 | Camera gain multiplier |
| `target_fps` | 0.0 - 120.0 | 0 | Target FPS (0 = maximum) |
### Image Quality Settings
| Setting | Range | Default | Description |
|---------|-------|---------|-------------|
| `sharpness` | 0 - 200 | 100 | Image sharpness (100 = no sharpening) |
| `contrast` | 0 - 200 | 100 | Image contrast (100 = normal) |
| `saturation` | 0 - 200 | 100 | Color saturation (color cameras only) |
| `gamma` | 0 - 300 | 100 | Gamma correction (100 = normal) |
### Color Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `auto_white_balance` | true/false | true | Automatic white balance |
| `color_temperature_preset` | 0-10 | 0 | Color temperature preset (0=auto) |
### Advanced Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `anti_flicker_enabled` | true/false | true | Reduce artificial lighting flicker |
| `light_frequency` | 0/1 | 1 | Light frequency (0=50Hz, 1=60Hz) |
| `noise_filter_enabled` | true/false | true | Basic noise filtering |
| `denoise_3d_enabled` | true/false | false | Advanced 3D denoising |
### HDR Settings
| Setting | Values | Default | Description |
|---------|--------|---------|-------------|
| `hdr_enabled` | true/false | false | High Dynamic Range |
| `hdr_gain_mode` | 0-3 | 0 | HDR processing mode |
## 🚀 Usage Examples
### Example 1: Adjust Exposure and Gain
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"exposure_ms": 1.5,
"gain": 4.0
}'
```
### Example 2: Improve Image Quality
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"sharpness": 150,
"contrast": 120,
"gamma": 90
}'
```
### Example 3: Configure for Indoor Lighting
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"anti_flicker_enabled": true,
"light_frequency": 1,
"auto_white_balance": false,
"color_temperature_preset": 2
}'
```
### Example 4: Enable HDR Mode
```bash
curl -X PUT http://localhost:8000/cameras/camera1/config \
-H "Content-Type: application/json" \
-d '{
"hdr_enabled": true,
"hdr_gain_mode": 1
}'
```
## ⚛️ React Integration Examples
### Camera Configuration Component
```jsx
import React, { useState, useEffect } from 'react';
const CameraConfig = ({ cameraName, apiBaseUrl = 'http://localhost:8000' }) => {
const [config, setConfig] = useState(null);
const [loading, setLoading] = useState(false);
const [error, setError] = useState(null);
// Load current configuration
useEffect(() => {
fetchConfig();
}, [cameraName]);
const fetchConfig = async () => {
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/config`);
if (response.ok) {
const data = await response.json();
setConfig(data);
} else {
setError('Failed to load configuration');
}
} catch (err) {
setError(`Error: ${err.message}`);
}
};
const updateConfig = async (updates) => {
setLoading(true);
try {
const response = await fetch(`${apiBaseUrl}/cameras/${cameraName}/config`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(updates)
});
if (response.ok) {
const result = await response.json();
console.log('Updated settings:', result.updated_settings);
await fetchConfig(); // Reload configuration
} else {
const error = await response.json();
setError(error.detail || 'Update failed');
}
} catch (err) {
setError(`Error: ${err.message}`);
} finally {
setLoading(false);
}
};
const handleSliderChange = (setting, value) => {
updateConfig({ [setting]: value });
};
if (!config) return <div>Loading configuration...</div>;
return (
<div className="camera-config">
<h3>Camera Configuration: {cameraName}</h3>
{/* Basic Settings */}
<div className="config-section">
<h4>Basic Settings</h4>
<div className="setting">
<label>Exposure (ms): {config.exposure_ms}</label>
<input
type="range"
min="0.1"
max="10"
step="0.1"
value={config.exposure_ms}
onChange={(e) => handleSliderChange('exposure_ms', parseFloat(e.target.value))}
/>
</div>
<div className="setting">
<label>Gain: {config.gain}</label>
<input
type="range"
min="0"
max="10"
step="0.1"
value={config.gain}
onChange={(e) => handleSliderChange('gain', parseFloat(e.target.value))}
/>
</div>
<div className="setting">
<label>Target FPS: {config.target_fps}</label>
<input
type="range"
min="0"
max="30"
step="1"
value={config.target_fps}
onChange={(e) => handleSliderChange('target_fps', parseInt(e.target.value))}
/>
</div>
</div>
{/* Image Quality Settings */}
<div className="config-section">
<h4>Image Quality</h4>
<div className="setting">
<label>Sharpness: {config.sharpness}</label>
<input
type="range"
min="0"
max="200"
value={config.sharpness}
onChange={(e) => handleSliderChange('sharpness', parseInt(e.target.value))}
/>
</div>
<div className="setting">
<label>Contrast: {config.contrast}</label>
<input
type="range"
min="0"
max="200"
value={config.contrast}
onChange={(e) => handleSliderChange('contrast', parseInt(e.target.value))}
/>
</div>
<div className="setting">
<label>Gamma: {config.gamma}</label>
<input
type="range"
min="0"
max="300"
value={config.gamma}
onChange={(e) => handleSliderChange('gamma', parseInt(e.target.value))}
/>
</div>
</div>
{/* Advanced Settings */}
<div className="config-section">
<h4>Advanced Settings</h4>
<div className="setting">
<label>
<input
type="checkbox"
checked={config.anti_flicker_enabled}
onChange={(e) => updateConfig({ anti_flicker_enabled: e.target.checked })}
/>
Anti-flicker Enabled
</label>
</div>
<div className="setting">
<label>
<input
type="checkbox"
checked={config.auto_white_balance}
onChange={(e) => updateConfig({ auto_white_balance: e.target.checked })}
/>
Auto White Balance
</label>
</div>
<div className="setting">
<label>
<input
type="checkbox"
checked={config.hdr_enabled}
onChange={(e) => updateConfig({ hdr_enabled: e.target.checked })}
/>
HDR Enabled
</label>
</div>
</div>
{error && (
<div className="error" style={{ color: 'red', marginTop: '10px' }}>
{error}
</div>
)}
{loading && <div>Updating configuration...</div>}
</div>
);
};
export default CameraConfig;
```
## 🔄 Configuration Workflow
### 1. Real-time Adjustments
For settings that don't require restart:
```bash
# Update settings
curl -X PUT /cameras/camera1/config -d '{"exposure_ms": 2.0}'
# Settings take effect immediately
# Continue recording/streaming without interruption
```
### 2. Settings Requiring Restart
For noise reduction and system settings:
```bash
# Update settings
curl -X PUT /cameras/camera1/config -d '{"noise_filter_enabled": false}'
# Apply configuration (restarts camera)
curl -X POST /cameras/camera1/apply-config
# Camera reinitializes with new settings
```
## 🚨 Important Notes
### Camera State During Updates
- **Real-time settings**: Applied immediately, no interruption
- **Restart-required settings**: Saved to config, applied on next restart
- **Recording**: Continues during real-time updates
- **Streaming**: Continues during real-time updates
### Error Handling
- Invalid ranges return HTTP 422 with validation errors
- Camera not found returns HTTP 404
- SDK errors are logged and return HTTP 500
### Performance Impact
- **Image quality settings**: Minimal performance impact
- **Noise reduction**: May reduce FPS when enabled
- **HDR**: Significant processing overhead when enabled
This comprehensive API allows you to control all camera settings programmatically, making it perfect for integration with React dashboards or automated optimization systems!

240
STREAMING_GUIDE.md Normal file
View File

@@ -0,0 +1,240 @@
# 🎥 USDA Vision Camera Live Streaming Guide
This guide explains how to use the new live preview streaming functionality that allows you to view camera feeds in real-time without blocking recording operations.
## 🌟 Key Features
- **Non-blocking streaming**: Live preview doesn't interfere with recording
- **Separate camera connections**: Streaming uses independent camera instances
- **MJPEG streaming**: Standard web-compatible video streaming
- **Multiple concurrent viewers**: Multiple browsers can view the same stream
- **REST API control**: Start/stop streaming via API endpoints
- **Web interface**: Ready-to-use HTML interface for live preview
## 🏗️ Architecture
The streaming system creates separate camera connections for preview that are independent from recording:
```
Camera Hardware
├── Recording Connection (CameraRecorder)
│ ├── Used for video file recording
│ ├── Triggered by MQTT machine states
│ └── High quality, full FPS
└── Streaming Connection (CameraStreamer)
├── Used for live preview
├── Controlled via API endpoints
└── Optimized for web viewing (lower FPS, JPEG compression)
```
## 🚀 Quick Start
### 1. Start the System
```bash
python main.py
```
### 2. Open the Web Interface
Open `camera_preview.html` in your browser and click "Start Stream" for any camera.
### 3. API Usage
```bash
# Start streaming for camera1
curl -X POST http://localhost:8000/cameras/camera1/start-stream
# View live stream (open in browser)
http://localhost:8000/cameras/camera1/stream
# Stop streaming
curl -X POST http://localhost:8000/cameras/camera1/stop-stream
```
## 📡 API Endpoints
### Start Streaming
```http
POST /cameras/{camera_name}/start-stream
```
**Response:**
```json
{
"success": true,
"message": "Started streaming for camera camera1"
}
```
### Stop Streaming
```http
POST /cameras/{camera_name}/stop-stream
```
**Response:**
```json
{
"success": true,
"message": "Stopped streaming for camera camera1"
}
```
### Live Stream (MJPEG)
```http
GET /cameras/{camera_name}/stream
```
**Response:** Multipart MJPEG stream
**Content-Type:** `multipart/x-mixed-replace; boundary=frame`
## 🌐 Web Interface Usage
The included `camera_preview.html` provides a complete web interface:
1. **Camera Grid**: Shows all configured cameras
2. **Stream Controls**: Start/Stop/Refresh buttons for each camera
3. **Live Preview**: Real-time video feed display
4. **Status Information**: System and camera status
5. **Responsive Design**: Works on desktop and mobile
### Features:
- ✅ Real-time camera status
- ✅ One-click stream start/stop
- ✅ Automatic stream refresh
- ✅ System health monitoring
- ✅ Error handling and status messages
## 🔧 Technical Details
### Camera Streamer Configuration
- **Preview FPS**: 10 FPS (configurable)
- **JPEG Quality**: 70% (configurable)
- **Frame Buffer**: 5 frames (prevents memory buildup)
- **Timeout**: 200ms per frame capture
### Memory Management
- Automatic frame buffer cleanup
- Queue-based frame management
- Proper camera resource cleanup on stop
### Thread Safety
- Thread-safe streaming operations
- Independent from recording threads
- Proper synchronization with locks
## 🧪 Testing
### Run the Test Script
```bash
python test_streaming.py
```
This will test:
- ✅ API endpoint functionality
- ✅ Stream start/stop operations
- ✅ Concurrent recording and streaming
- ✅ Error handling
### Manual Testing
1. Start the system: `python main.py`
2. Open `camera_preview.html` in browser
3. Start streaming for a camera
4. Trigger recording via MQTT or manual API
5. Verify both work simultaneously
## 🔄 Concurrent Operations
The system supports these concurrent operations:
| Operation | Recording | Streaming | Notes |
|-----------|-----------|-----------|-------|
| Recording Only | ✅ | ❌ | Normal operation |
| Streaming Only | ❌ | ✅ | Preview without recording |
| Both Concurrent | ✅ | ✅ | **Independent connections** |
### Example: Concurrent Usage
```bash
# Start streaming
curl -X POST http://localhost:8000/cameras/camera1/start-stream
# Start recording (while streaming continues)
curl -X POST http://localhost:8000/cameras/camera1/start-recording \
-H "Content-Type: application/json" \
-d '{"filename": "test_recording.avi"}'
# Both operations run independently!
```
## 🛠️ Configuration
### Stream Settings (in CameraStreamer)
```python
self.preview_fps = 10.0 # Lower FPS for preview
self.preview_quality = 70 # JPEG quality (1-100)
self._frame_queue.maxsize = 5 # Frame buffer size
```
### Camera Settings
The streamer uses the same camera configuration as recording:
- Exposure time from `camera_config.exposure_ms`
- Gain from `camera_config.gain`
- Optimized trigger mode for continuous streaming
## 🚨 Important Notes
### Camera Access Patterns
- **Recording**: Blocks camera during active recording
- **Streaming**: Uses separate connection, doesn't block
- **Health Checks**: Brief, non-blocking camera tests
- **Multiple Streams**: Multiple browsers can view same stream
### Performance Considerations
- Streaming uses additional CPU/memory resources
- Lower preview FPS reduces system load
- JPEG compression reduces bandwidth usage
- Frame queue prevents memory buildup
### Error Handling
- Automatic camera resource cleanup
- Graceful handling of camera disconnections
- Stream auto-restart capabilities
- Detailed error logging
## 🔍 Troubleshooting
### Stream Not Starting
1. Check camera availability: `GET /cameras`
2. Verify camera not in error state
3. Check system logs for camera initialization errors
4. Try camera reconnection: `POST /cameras/{name}/reconnect`
### Poor Stream Quality
1. Adjust `preview_quality` setting (higher = better quality)
2. Increase `preview_fps` for smoother video
3. Check network bandwidth
4. Verify camera exposure/gain settings
### Browser Issues
1. Try different browser (Chrome/Firefox recommended)
2. Check browser console for JavaScript errors
3. Verify CORS settings in API server
4. Clear browser cache and refresh
## 📈 Future Enhancements
Potential improvements for the streaming system:
- 🔄 WebRTC support for lower latency
- 📱 Mobile app integration
- 🎛️ Real-time camera setting adjustments
- 📊 Stream analytics and monitoring
- 🔐 Authentication and access control
- 🌐 Multi-camera synchronized viewing
## 📞 Support
For issues with streaming functionality:
1. Check the system logs: `usda_vision_system.log`
2. Run the test script: `python test_streaming.py`
3. Verify API health: `http://localhost:8000/health`
4. Check camera status: `http://localhost:8000/cameras`
---
**✅ Live streaming is now ready for production use!**

View File

@@ -1,14 +1,74 @@
############################################################################### ###############################################################################
# USDA Vision Camera System - Complete API Endpoints Documentation # USDA Vision Camera System - Complete API Endpoints Documentation
# Base URL: http://localhost:8000 #
# CONFIGURATION:
# - Default Base URL: http://localhost:8000 (local development)
# - Production Base URL: http://vision:8000 (when using hostname 'vision')
# - Custom hostname: Update @baseUrl variable below
#
# HOSTNAME SETUP:
# To use 'vision' hostname instead of 'localhost':
# 1. Add to /etc/hosts: 127.0.0.1 vision
# 2. Or configure DNS to point 'vision' to the server IP
# 3. Update camera_preview.html: API_BASE = 'http://vision:8000'
############################################################################### ###############################################################################
# Base URL Configuration - Change this to match your setup
@baseUrl = http://vision:8000
# Alternative configurations:
# @baseUrl = http://localhost:8000 # Local development
# @baseUrl = http://192.168.1.100:8000 # Specific IP address
# @baseUrl = http://your-server:8000 # Custom hostname
###############################################################################
# CONFIGURATION GUIDE
###############################################################################
### HOSTNAME CONFIGURATION OPTIONS:
# Option 1: Using 'vision' hostname (recommended for production)
# - Requires hostname resolution setup
# - Add to /etc/hosts: 127.0.0.1 vision
# - Or configure DNS: vision -> server IP address
# - Update camera_preview.html: API_BASE = 'http://vision:8000'
# - Set @baseUrl = http://vision:8000
# Option 2: Using localhost (development)
# - Works immediately on local machine
# - Set @baseUrl = http://localhost:8000
# - Update camera_preview.html: API_BASE = 'http://localhost:8000'
# Option 3: Using specific IP address
# - Replace with actual server IP
# - Set @baseUrl = http://192.168.1.100:8000
# - Update camera_preview.html: API_BASE = 'http://192.168.1.100:8000'
# Option 4: Custom hostname
# - Configure DNS or /etc/hosts for custom name
# - Set @baseUrl = http://your-custom-name:8000
# - Update camera_preview.html: API_BASE = 'http://your-custom-name:8000'
### NETWORK CONFIGURATION:
# - Default port: 8000
# - CORS enabled for all origins (configure for production)
# - No authentication required (add if needed)
### CLIENT CONFIGURATION FILES TO UPDATE:
# 1. camera_preview.html - Update API_BASE constant
# 2. React projects - Update apiConfig.baseUrl
# 3. This file - Update @baseUrl variable
# 4. Any custom scripts - Update base URL
### TESTING CONNECTIVITY:
# Test if the API is reachable:
GET {{baseUrl}}/health
############################################################################### ###############################################################################
# SYSTEM ENDPOINTS # SYSTEM ENDPOINTS
############################################################################### ###############################################################################
### Root endpoint - API information ### Root endpoint - API information
GET http://localhost:8000/ GET {{baseUrl}}/
# Response: SuccessResponse # Response: SuccessResponse
# { # {
# "success": true, # "success": true,
@@ -427,3 +487,14 @@ Content-Type: application/json
# - fps omitted: Uses camera config default # - fps omitted: Uses camera config default
# 6. Filenames automatically get datetime prefix: YYYYMMDD_HHMMSS_filename.avi # 6. Filenames automatically get datetime prefix: YYYYMMDD_HHMMSS_filename.avi
# 7. Recovery endpoints should be used in order: test-connection → reconnect → restart-grab → full-reset → reinitialize # 7. Recovery endpoints should be used in order: test-connection → reconnect → restart-grab → full-reset → reinitialize
### Start streaming for camera1
curl -X POST http://localhost:8000/cameras/camera1/start-stream
# View live stream (open in browser)
# http://localhost:8000/cameras/camera1/stream
### Stop streaming
curl -X POST http://localhost:8000/cameras/camera1/stop-stream

367
camera-api.types.ts Normal file
View File

@@ -0,0 +1,367 @@
/**
* TypeScript definitions for USDA Vision Camera System API
*
* This file provides complete type definitions for AI assistants
* to integrate the camera streaming functionality into React/TypeScript projects.
*/
// =============================================================================
// BASE CONFIGURATION
// =============================================================================
export interface ApiConfig {
baseUrl: string;
timeout?: number;
refreshInterval?: number;
}
export const defaultApiConfig: ApiConfig = {
baseUrl: 'http://vision:8000', // Production default, change to 'http://localhost:8000' for development
timeout: 10000,
refreshInterval: 30000,
};
// =============================================================================
// CAMERA TYPES
// =============================================================================
export interface CameraDeviceInfo {
friendly_name?: string;
port_type?: string;
serial_number?: string;
device_index?: number;
error?: string;
}
export interface CameraInfo {
name: string;
status: 'connected' | 'disconnected' | 'error' | 'not_found' | 'available';
is_recording: boolean;
last_checked: string; // ISO date string
last_error?: string | null;
device_info?: CameraDeviceInfo;
current_recording_file?: string | null;
recording_start_time?: string | null; // ISO date string
}
export interface CameraListResponse {
[cameraName: string]: CameraInfo;
}
// =============================================================================
// STREAMING TYPES
// =============================================================================
export interface StreamStartRequest {
// No body required - camera name is in URL path
}
export interface StreamStartResponse {
success: boolean;
message: string;
}
export interface StreamStopRequest {
// No body required - camera name is in URL path
}
export interface StreamStopResponse {
success: boolean;
message: string;
}
export interface StreamStatus {
isStreaming: boolean;
streamUrl?: string;
error?: string;
}
// =============================================================================
// RECORDING TYPES
// =============================================================================
export interface StartRecordingRequest {
filename?: string;
exposure_ms?: number;
gain?: number;
fps?: number;
}
export interface StartRecordingResponse {
success: boolean;
message: string;
filename?: string;
}
export interface StopRecordingResponse {
success: boolean;
message: string;
}
// =============================================================================
// SYSTEM TYPES
// =============================================================================
export interface SystemStatusResponse {
status: string;
uptime: string;
api_server_running: boolean;
camera_manager_running: boolean;
mqtt_client_connected: boolean;
total_cameras: number;
active_recordings: number;
active_streams?: number;
}
export interface HealthResponse {
status: 'healthy' | 'unhealthy';
timestamp: string;
}
// =============================================================================
// ERROR TYPES
// =============================================================================
export interface ApiError {
detail: string;
status_code?: number;
}
export interface StreamError extends Error {
type: 'network' | 'api' | 'stream' | 'timeout';
cameraName: string;
originalError?: Error;
}
// =============================================================================
// HOOK TYPES
// =============================================================================
export interface UseCameraStreamResult {
isStreaming: boolean;
loading: boolean;
error: string | null;
startStream: () => Promise<{ success: boolean; error?: string }>;
stopStream: () => Promise<{ success: boolean; error?: string }>;
getStreamUrl: () => string;
refreshStream: () => void;
}
export interface UseCameraListResult {
cameras: CameraListResponse;
loading: boolean;
error: string | null;
refreshCameras: () => Promise<void>;
}
export interface UseCameraRecordingResult {
isRecording: boolean;
loading: boolean;
error: string | null;
currentFile: string | null;
startRecording: (options?: StartRecordingRequest) => Promise<{ success: boolean; error?: string }>;
stopRecording: () => Promise<{ success: boolean; error?: string }>;
}
// =============================================================================
// COMPONENT PROPS TYPES
// =============================================================================
export interface CameraStreamProps {
cameraName: string;
apiConfig?: ApiConfig;
autoStart?: boolean;
onStreamStart?: (cameraName: string) => void;
onStreamStop?: (cameraName: string) => void;
onError?: (error: StreamError) => void;
className?: string;
style?: React.CSSProperties;
}
export interface CameraDashboardProps {
apiConfig?: ApiConfig;
cameras?: string[]; // If provided, only show these cameras
showRecordingControls?: boolean;
showStreamingControls?: boolean;
refreshInterval?: number;
onCameraSelect?: (cameraName: string) => void;
className?: string;
}
export interface CameraControlsProps {
cameraName: string;
apiConfig?: ApiConfig;
showRecording?: boolean;
showStreaming?: boolean;
onAction?: (action: 'start-stream' | 'stop-stream' | 'start-recording' | 'stop-recording', cameraName: string) => void;
}
// =============================================================================
// API CLIENT TYPES
// =============================================================================
export interface CameraApiClient {
// System endpoints
getHealth(): Promise<HealthResponse>;
getSystemStatus(): Promise<SystemStatusResponse>;
// Camera endpoints
getCameras(): Promise<CameraListResponse>;
getCameraStatus(cameraName: string): Promise<CameraInfo>;
testCameraConnection(cameraName: string): Promise<{ success: boolean; message: string }>;
// Streaming endpoints
startStream(cameraName: string): Promise<StreamStartResponse>;
stopStream(cameraName: string): Promise<StreamStopResponse>;
getStreamUrl(cameraName: string): string;
// Recording endpoints
startRecording(cameraName: string, options?: StartRecordingRequest): Promise<StartRecordingResponse>;
stopRecording(cameraName: string): Promise<StopRecordingResponse>;
}
// =============================================================================
// UTILITY TYPES
// =============================================================================
export type CameraAction = 'start-stream' | 'stop-stream' | 'start-recording' | 'stop-recording' | 'test-connection';
export interface CameraActionResult {
success: boolean;
message: string;
error?: string;
}
export interface StreamingState {
[cameraName: string]: {
isStreaming: boolean;
isLoading: boolean;
error: string | null;
lastStarted?: Date;
};
}
export interface RecordingState {
[cameraName: string]: {
isRecording: boolean;
isLoading: boolean;
error: string | null;
currentFile: string | null;
startTime?: Date;
};
}
// =============================================================================
// EVENT TYPES
// =============================================================================
export interface CameraEvent {
type: 'stream-started' | 'stream-stopped' | 'stream-error' | 'recording-started' | 'recording-stopped' | 'recording-error';
cameraName: string;
timestamp: Date;
data?: any;
}
export type CameraEventHandler = (event: CameraEvent) => void;
// =============================================================================
// CONFIGURATION TYPES
// =============================================================================
export interface StreamConfig {
fps: number;
quality: number; // 1-100
timeout: number;
retryAttempts: number;
retryDelay: number;
}
export interface CameraStreamConfig extends StreamConfig {
cameraName: string;
autoReconnect: boolean;
maxReconnectAttempts: number;
}
// =============================================================================
// CONTEXT TYPES (for React Context)
// =============================================================================
export interface CameraContextValue {
cameras: CameraListResponse;
streamingState: StreamingState;
recordingState: RecordingState;
apiClient: CameraApiClient;
// Actions
startStream: (cameraName: string) => Promise<CameraActionResult>;
stopStream: (cameraName: string) => Promise<CameraActionResult>;
startRecording: (cameraName: string, options?: StartRecordingRequest) => Promise<CameraActionResult>;
stopRecording: (cameraName: string) => Promise<CameraActionResult>;
refreshCameras: () => Promise<void>;
// State
loading: boolean;
error: string | null;
}
// =============================================================================
// EXAMPLE USAGE TYPES
// =============================================================================
/**
* Example usage in React component:
*
* ```typescript
* import { CameraStreamProps, UseCameraStreamResult } from './camera-api.types';
*
* const CameraStream: React.FC<CameraStreamProps> = ({
* cameraName,
* apiConfig = defaultApiConfig,
* autoStart = false,
* onStreamStart,
* onStreamStop,
* onError
* }) => {
* const {
* isStreaming,
* loading,
* error,
* startStream,
* stopStream,
* getStreamUrl
* }: UseCameraStreamResult = useCameraStream(cameraName, apiConfig);
*
* // Component implementation...
* };
* ```
*/
/**
* Example API client usage:
*
* ```typescript
* const apiClient: CameraApiClient = new CameraApiClientImpl(defaultApiConfig);
*
* // Start streaming
* const result = await apiClient.startStream('camera1');
* if (result.success) {
* const streamUrl = apiClient.getStreamUrl('camera1');
* // Use streamUrl in img tag
* }
* ```
*/
/**
* Example hook usage:
*
* ```typescript
* const MyComponent = () => {
* const { cameras, loading, error, refreshCameras } = useCameraList();
* const { isStreaming, startStream, stopStream } = useCameraStream('camera1');
*
* // Component logic...
* };
* ```
*/
export default {};

336
camera_preview.html Normal file
View File

@@ -0,0 +1,336 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>USDA Vision Camera Live Preview</title>
<style>
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 20px;
background-color: #f5f5f5;
}
.container {
max-width: 1200px;
margin: 0 auto;
background-color: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 10px rgba(0,0,0,0.1);
}
h1 {
color: #333;
text-align: center;
margin-bottom: 30px;
}
.camera-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(400px, 1fr));
gap: 20px;
margin-bottom: 30px;
}
.camera-card {
border: 1px solid #ddd;
border-radius: 8px;
padding: 15px;
background-color: #fafafa;
}
.camera-title {
font-size: 18px;
font-weight: bold;
margin-bottom: 10px;
color: #333;
}
.camera-stream {
width: 100%;
max-width: 100%;
height: auto;
border: 2px solid #ddd;
border-radius: 4px;
background-color: #000;
min-height: 200px;
display: block;
}
.camera-controls {
margin-top: 10px;
display: flex;
gap: 10px;
flex-wrap: wrap;
}
.btn {
padding: 8px 16px;
border: none;
border-radius: 4px;
cursor: pointer;
font-size: 14px;
transition: background-color 0.3s;
}
.btn-primary {
background-color: #007bff;
color: white;
}
.btn-primary:hover {
background-color: #0056b3;
}
.btn-secondary {
background-color: #6c757d;
color: white;
}
.btn-secondary:hover {
background-color: #545b62;
}
.btn-success {
background-color: #28a745;
color: white;
}
.btn-success:hover {
background-color: #1e7e34;
}
.btn-danger {
background-color: #dc3545;
color: white;
}
.btn-danger:hover {
background-color: #c82333;
}
.status {
margin-top: 10px;
padding: 8px;
border-radius: 4px;
font-size: 14px;
}
.status-success {
background-color: #d4edda;
color: #155724;
border: 1px solid #c3e6cb;
}
.status-error {
background-color: #f8d7da;
color: #721c24;
border: 1px solid #f5c6cb;
}
.status-info {
background-color: #d1ecf1;
color: #0c5460;
border: 1px solid #bee5eb;
}
.system-info {
margin-top: 30px;
padding: 15px;
background-color: #e9ecef;
border-radius: 4px;
}
.system-info h3 {
margin-top: 0;
color: #495057;
}
.api-info {
font-family: monospace;
font-size: 12px;
color: #6c757d;
}
</style>
</head>
<body>
<div class="container">
<h1>🎥 USDA Vision Camera Live Preview</h1>
<div class="camera-grid" id="cameraGrid">
<!-- Camera cards will be dynamically generated -->
</div>
<div class="system-info">
<h3>📡 System Information</h3>
<div id="systemStatus">Loading system status...</div>
<h3>🔗 API Endpoints</h3>
<div class="api-info">
<p><strong>Live Stream:</strong> GET /cameras/{camera_name}/stream</p>
<p><strong>Start Stream:</strong> POST /cameras/{camera_name}/start-stream</p>
<p><strong>Stop Stream:</strong> POST /cameras/{camera_name}/stop-stream</p>
<p><strong>Camera Status:</strong> GET /cameras</p>
</div>
</div>
</div>
<script>
const API_BASE = 'http://vision:8000';
let cameras = {};
// Initialize the page
async function init() {
await loadCameras();
await loadSystemStatus();
// Refresh status every 5 seconds
setInterval(loadSystemStatus, 5000);
}
// Load camera information
async function loadCameras() {
try {
const response = await fetch(`${API_BASE}/cameras`);
const data = await response.json();
cameras = data;
renderCameras();
} catch (error) {
console.error('Error loading cameras:', error);
showError('Failed to load camera information');
}
}
// Load system status
async function loadSystemStatus() {
try {
const response = await fetch(`${API_BASE}/system/status`);
const data = await response.json();
const statusDiv = document.getElementById('systemStatus');
statusDiv.innerHTML = `
<p><strong>System:</strong> ${data.status}</p>
<p><strong>Uptime:</strong> ${data.uptime}</p>
<p><strong>API Server:</strong> ${data.api_server_running ? '✅ Running' : '❌ Stopped'}</p>
<p><strong>Camera Manager:</strong> ${data.camera_manager_running ? '✅ Running' : '❌ Stopped'}</p>
<p><strong>MQTT Client:</strong> ${data.mqtt_client_connected ? '✅ Connected' : '❌ Disconnected'}</p>
`;
} catch (error) {
console.error('Error loading system status:', error);
document.getElementById('systemStatus').innerHTML = '<p style="color: red;">Failed to load system status</p>';
}
}
// Render camera cards
function renderCameras() {
const grid = document.getElementById('cameraGrid');
grid.innerHTML = '';
for (const [cameraName, cameraInfo] of Object.entries(cameras)) {
const card = createCameraCard(cameraName, cameraInfo);
grid.appendChild(card);
}
}
// Create a camera card
function createCameraCard(cameraName, cameraInfo) {
const card = document.createElement('div');
card.className = 'camera-card';
card.innerHTML = `
<div class="camera-title">${cameraName}</div>
<img class="camera-stream" id="stream-${cameraName}"
src="data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iNDAwIiBoZWlnaHQ9IjIwMCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj48cmVjdCB3aWR0aD0iMTAwJSIgaGVpZ2h0PSIxMDAlIiBmaWxsPSIjZGRkIi8+PHRleHQgeD0iNTAlIiB5PSI1MCUiIGZvbnQtZmFtaWx5PSJBcmlhbCIgZm9udC1zaXplPSIxNCIgZmlsbD0iIzk5OSIgdGV4dC1hbmNob3I9Im1pZGRsZSIgZHk9Ii4zZW0iPk5vIFN0cmVhbTwvdGV4dD48L3N2Zz4="
alt="Camera Stream">
<div class="camera-controls">
<button class="btn btn-success" onclick="startStream('${cameraName}')">Start Stream</button>
<button class="btn btn-danger" onclick="stopStream('${cameraName}')">Stop Stream</button>
<button class="btn btn-secondary" onclick="refreshStream('${cameraName}')">Refresh</button>
</div>
<div class="status status-info" id="status-${cameraName}">
Status: ${cameraInfo.status} | Recording: ${cameraInfo.is_recording ? 'Yes' : 'No'}
</div>
`;
return card;
}
// Start streaming for a camera
async function startStream(cameraName) {
try {
updateStatus(cameraName, 'Starting stream...', 'info');
// Start the stream
const response = await fetch(`${API_BASE}/cameras/${cameraName}/start-stream`, {
method: 'POST'
});
if (response.ok) {
// Set the stream source
const streamImg = document.getElementById(`stream-${cameraName}`);
streamImg.src = `${API_BASE}/cameras/${cameraName}/stream?t=${Date.now()}`;
updateStatus(cameraName, 'Stream started successfully', 'success');
} else {
const error = await response.text();
updateStatus(cameraName, `Failed to start stream: ${error}`, 'error');
}
} catch (error) {
console.error('Error starting stream:', error);
updateStatus(cameraName, `Error starting stream: ${error.message}`, 'error');
}
}
// Stop streaming for a camera
async function stopStream(cameraName) {
try {
updateStatus(cameraName, 'Stopping stream...', 'info');
const response = await fetch(`${API_BASE}/cameras/${cameraName}/stop-stream`, {
method: 'POST'
});
if (response.ok) {
// Clear the stream source
const streamImg = document.getElementById(`stream-${cameraName}`);
streamImg.src = "data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iNDAwIiBoZWlnaHQ9IjIwMCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIj48cmVjdCB3aWR0aD0iMTAwJSIgaGVpZ2h0PSIxMDAlIiBmaWxsPSIjZGRkIi8+PHRleHQgeD0iNTAlIiB5PSI1MCUiIGZvbnQtZmFtaWx5PSJBcmlhbCIgZm9udC1zaXplPSIxNCIgZmlsbD0iIzk5OSIgdGV4dC1hbmNob3I9Im1pZGRsZSIgZHk9Ii4zZW0iPk5vIFN0cmVhbTwvdGV4dD48L3N2Zz4=";
updateStatus(cameraName, 'Stream stopped successfully', 'success');
} else {
const error = await response.text();
updateStatus(cameraName, `Failed to stop stream: ${error}`, 'error');
}
} catch (error) {
console.error('Error stopping stream:', error);
updateStatus(cameraName, `Error stopping stream: ${error.message}`, 'error');
}
}
// Refresh stream for a camera
function refreshStream(cameraName) {
const streamImg = document.getElementById(`stream-${cameraName}`);
if (streamImg.src.includes('/stream')) {
streamImg.src = `${API_BASE}/cameras/${cameraName}/stream?t=${Date.now()}`;
updateStatus(cameraName, 'Stream refreshed', 'info');
} else {
updateStatus(cameraName, 'No active stream to refresh', 'error');
}
}
// Update status message
function updateStatus(cameraName, message, type) {
const statusDiv = document.getElementById(`status-${cameraName}`);
statusDiv.className = `status status-${type}`;
statusDiv.textContent = message;
}
// Show error message
function showError(message) {
alert(`Error: ${message}`);
}
// Initialize when page loads
document.addEventListener('DOMContentLoaded', init);
</script>
</body>
</html>

524
streaming-api.http Normal file
View File

@@ -0,0 +1,524 @@
### USDA Vision Camera Streaming API
###
### CONFIGURATION:
### - Production: http://vision:8000 (requires hostname setup)
### - Development: http://localhost:8000
### - Custom: Update @baseUrl below to match your setup
###
### This file contains streaming-specific API endpoints for live camera preview
### Use with VS Code REST Client extension or similar tools.
# Base URL - Update to match your configuration
@baseUrl = http://vision:8000
# Alternative: @baseUrl = http://localhost:8000
### =============================================================================
### STREAMING ENDPOINTS (NEW FUNCTIONALITY)
### =============================================================================
### Start camera streaming for live preview
### This creates a separate camera connection that doesn't interfere with recording
POST {{baseUrl}}/cameras/camera1/start-stream
Content-Type: application/json
### Expected Response:
# {
# "success": true,
# "message": "Started streaming for camera camera1"
# }
###
### Stop camera streaming
POST {{baseUrl}}/cameras/camera1/stop-stream
Content-Type: application/json
### Expected Response:
# {
# "success": true,
# "message": "Stopped streaming for camera camera1"
# }
###
### Get live MJPEG stream (open in browser or use as img src)
### This endpoint returns a continuous MJPEG stream
### Content-Type: multipart/x-mixed-replace; boundary=frame
GET {{baseUrl}}/cameras/camera1/stream
### Usage in HTML:
# <img src="http://localhost:8000/cameras/camera1/stream" alt="Live Stream" />
### Usage in React:
# <img src={`${apiBaseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`} />
###
### Start streaming for camera2
POST {{baseUrl}}/cameras/camera2/start-stream
Content-Type: application/json
###
### Get live stream for camera2
GET {{baseUrl}}/cameras/camera2/stream
###
### Stop streaming for camera2
POST {{baseUrl}}/cameras/camera2/stop-stream
Content-Type: application/json
### =============================================================================
### CONCURRENT OPERATIONS TESTING
### =============================================================================
### Test Scenario: Streaming + Recording Simultaneously
### This demonstrates that streaming doesn't block recording
### Step 1: Start streaming first
POST {{baseUrl}}/cameras/camera1/start-stream
Content-Type: application/json
###
### Step 2: Start recording (while streaming continues)
POST {{baseUrl}}/cameras/camera1/start-recording
Content-Type: application/json
{
"filename": "concurrent_test.avi"
}
###
### Step 3: Check both are running
GET {{baseUrl}}/cameras/camera1
### Expected Response shows both recording and streaming active:
# {
# "camera1": {
# "name": "camera1",
# "status": "connected",
# "is_recording": true,
# "current_recording_file": "concurrent_test.avi",
# "recording_start_time": "2025-01-28T10:30:00.000Z"
# }
# }
###
### Step 4: Stop recording (streaming continues)
POST {{baseUrl}}/cameras/camera1/stop-recording
Content-Type: application/json
###
### Step 5: Verify streaming still works
GET {{baseUrl}}/cameras/camera1/stream
###
### Step 6: Stop streaming
POST {{baseUrl}}/cameras/camera1/stop-stream
Content-Type: application/json
### =============================================================================
### MULTIPLE CAMERA STREAMING
### =============================================================================
### Start streaming on multiple cameras simultaneously
POST {{baseUrl}}/cameras/camera1/start-stream
Content-Type: application/json
###
POST {{baseUrl}}/cameras/camera2/start-stream
Content-Type: application/json
###
### Check status of all cameras
GET {{baseUrl}}/cameras
###
### Access multiple streams (open in separate browser tabs)
GET {{baseUrl}}/cameras/camera1/stream
###
GET {{baseUrl}}/cameras/camera2/stream
###
### Stop all streaming
POST {{baseUrl}}/cameras/camera1/stop-stream
Content-Type: application/json
###
POST {{baseUrl}}/cameras/camera2/stop-stream
Content-Type: application/json
### =============================================================================
### ERROR TESTING
### =============================================================================
### Test with invalid camera name
POST {{baseUrl}}/cameras/invalid_camera/start-stream
Content-Type: application/json
### Expected Response:
# {
# "detail": "Camera streamer not found: invalid_camera"
# }
###
### Test stream endpoint without starting stream first
GET {{baseUrl}}/cameras/camera1/stream
### Expected: May return error or empty stream depending on camera state
###
### Test starting stream when camera is in error state
POST {{baseUrl}}/cameras/camera1/start-stream
Content-Type: application/json
### If camera has issues, expected response:
# {
# "success": false,
# "message": "Failed to start streaming for camera camera1"
# }
### =============================================================================
### INTEGRATION EXAMPLES FOR AI ASSISTANTS
### =============================================================================
### React Component Integration:
# const CameraStream = ({ cameraName }) => {
# const [isStreaming, setIsStreaming] = useState(false);
#
# const startStream = async () => {
# const response = await fetch(`${baseUrl}/cameras/${cameraName}/start-stream`, {
# method: 'POST'
# });
# if (response.ok) {
# setIsStreaming(true);
# }
# };
#
# return (
# <div>
# <button onClick={startStream}>Start Stream</button>
# {isStreaming && (
# <img src={`${baseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`} />
# )}
# </div>
# );
# };
### JavaScript Fetch Example:
# const streamAPI = {
# async startStream(cameraName) {
# const response = await fetch(`${baseUrl}/cameras/${cameraName}/start-stream`, {
# method: 'POST',
# headers: { 'Content-Type': 'application/json' }
# });
# return response.json();
# },
#
# async stopStream(cameraName) {
# const response = await fetch(`${baseUrl}/cameras/${cameraName}/stop-stream`, {
# method: 'POST',
# headers: { 'Content-Type': 'application/json' }
# });
# return response.json();
# },
#
# getStreamUrl(cameraName) {
# return `${baseUrl}/cameras/${cameraName}/stream?t=${Date.now()}`;
# }
# };
### Vue.js Integration:
# <template>
# <div>
# <button @click="startStream">Start Stream</button>
# <img v-if="isStreaming" :src="streamUrl" />
# </div>
# </template>
#
# <script>
# export default {
# data() {
# return {
# isStreaming: false,
# cameraName: 'camera1'
# };
# },
# computed: {
# streamUrl() {
# return `${this.baseUrl}/cameras/${this.cameraName}/stream?t=${Date.now()}`;
# }
# },
# methods: {
# async startStream() {
# const response = await fetch(`${this.baseUrl}/cameras/${this.cameraName}/start-stream`, {
# method: 'POST'
# });
# if (response.ok) {
# this.isStreaming = true;
# }
# }
# }
# };
# </script>
### =============================================================================
### TROUBLESHOOTING
### =============================================================================
### If streams don't start:
# 1. Check camera status: GET /cameras
# 2. Verify system health: GET /health
# 3. Test camera connection: POST /cameras/{name}/test-connection
# 4. Check if camera is already recording (shouldn't matter, but good to know)
### If stream image doesn't load:
# 1. Verify stream was started: POST /cameras/{name}/start-stream
# 2. Check browser console for CORS errors
# 3. Try accessing stream URL directly in browser
# 4. Add timestamp to prevent caching: ?t=${Date.now()}
### If concurrent operations fail:
# 1. This should work - streaming and recording use separate connections
# 2. Check system logs for resource conflicts
# 3. Verify sufficient system resources (CPU/Memory)
# 4. Test with one camera first, then multiple
### Performance Notes:
# - Streaming uses ~10 FPS by default (configurable)
# - JPEG quality set to 70% (configurable)
# - Each stream uses additional CPU/memory
# - Multiple concurrent streams may impact performance
### =============================================================================
### CAMERA CONFIGURATION ENDPOINTS (NEW)
### =============================================================================
### Get camera configuration
GET {{baseUrl}}/cameras/camera1/config
### Expected Response:
# {
# "name": "camera1",
# "machine_topic": "vibratory_conveyor",
# "storage_path": "/storage/camera1",
# "enabled": true,
# "exposure_ms": 1.0,
# "gain": 3.5,
# "target_fps": 0,
# "sharpness": 120,
# "contrast": 110,
# "saturation": 100,
# "gamma": 100,
# "noise_filter_enabled": true,
# "denoise_3d_enabled": false,
# "auto_white_balance": true,
# "color_temperature_preset": 0,
# "anti_flicker_enabled": true,
# "light_frequency": 1,
# "bit_depth": 8,
# "hdr_enabled": false,
# "hdr_gain_mode": 0
# }
###
### Update basic camera settings (real-time, no restart required)
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"exposure_ms": 2.0,
"gain": 4.0,
"target_fps": 10.0
}
###
### Update image quality settings
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"sharpness": 150,
"contrast": 120,
"saturation": 110,
"gamma": 90
}
###
### Update advanced settings
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"anti_flicker_enabled": true,
"light_frequency": 1,
"auto_white_balance": false,
"color_temperature_preset": 2
}
###
### Enable HDR mode
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"hdr_enabled": true,
"hdr_gain_mode": 1
}
###
### Update noise reduction settings (requires restart)
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"noise_filter_enabled": false,
"denoise_3d_enabled": true
}
###
### Apply configuration (restart camera with new settings)
POST {{baseUrl}}/cameras/camera1/apply-config
### Expected Response:
# {
# "success": true,
# "message": "Configuration applied to camera camera1"
# }
###
### Get camera2 configuration
GET {{baseUrl}}/cameras/camera2/config
###
### Update camera2 for outdoor lighting
PUT {{baseUrl}}/cameras/camera2/config
Content-Type: application/json
{
"exposure_ms": 0.5,
"gain": 2.0,
"sharpness": 130,
"contrast": 115,
"anti_flicker_enabled": true,
"light_frequency": 1
}
### =============================================================================
### CONFIGURATION TESTING SCENARIOS
### =============================================================================
### Scenario 1: Low light optimization
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"exposure_ms": 5.0,
"gain": 8.0,
"noise_filter_enabled": true,
"denoise_3d_enabled": true
}
###
### Scenario 2: High speed capture
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"exposure_ms": 0.2,
"gain": 1.0,
"target_fps": 30.0,
"sharpness": 180
}
###
### Scenario 3: Color accuracy for food inspection
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"auto_white_balance": false,
"color_temperature_preset": 1,
"saturation": 120,
"contrast": 105,
"gamma": 95
}
###
### Scenario 4: HDR for high contrast scenes
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"hdr_enabled": true,
"hdr_gain_mode": 2,
"exposure_ms": 1.0,
"gain": 3.0
}
### =============================================================================
### ERROR TESTING FOR CONFIGURATION
### =============================================================================
### Test invalid camera name
GET {{baseUrl}}/cameras/invalid_camera/config
###
### Test invalid exposure range
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"exposure_ms": 2000.0
}
### Expected: HTTP 422 validation error
###
### Test invalid gain range
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{
"gain": 50.0
}
### Expected: HTTP 422 validation error
###
### Test empty configuration update
PUT {{baseUrl}}/cameras/camera1/config
Content-Type: application/json
{}
### Expected: HTTP 400 "No configuration updates provided"

80
test_frame_conversion.py Normal file
View File

@@ -0,0 +1,80 @@
#!/usr/bin/env python3
"""
Test script to verify the frame conversion fix works correctly.
"""
import sys
import os
import numpy as np
# Add the current directory to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
# Add camera SDK to path
sys.path.append(os.path.join(os.path.dirname(__file__), "camera_sdk"))
try:
import mvsdk
print("✅ mvsdk imported successfully")
except ImportError as e:
print(f"❌ Failed to import mvsdk: {e}")
sys.exit(1)
def test_frame_conversion():
"""Test the frame conversion logic"""
print("🧪 Testing frame conversion logic...")
# Simulate frame data
width, height = 640, 480
frame_size = width * height * 3 # RGB
# Create mock frame data
mock_frame_data = np.random.randint(0, 255, frame_size, dtype=np.uint8)
# Create a mock frame buffer (simulate memory address)
frame_buffer = mock_frame_data.ctypes.data
# Create mock FrameHead
class MockFrameHead:
def __init__(self):
self.iWidth = width
self.iHeight = height
self.uBytes = frame_size
frame_head = MockFrameHead()
try:
# Test the conversion logic (similar to what's in streamer.py)
frame_data_buffer = (mvsdk.c_ubyte * frame_head.uBytes).from_address(frame_buffer)
frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
frame = frame_data.reshape((frame_head.iHeight, frame_head.iWidth, 3))
print(f"✅ Frame conversion successful!")
print(f" Frame shape: {frame.shape}")
print(f" Frame dtype: {frame.dtype}")
print(f" Frame size: {frame.size} bytes")
return True
except Exception as e:
print(f"❌ Frame conversion failed: {e}")
return False
def main():
print("🔧 Frame Conversion Test")
print("=" * 40)
success = test_frame_conversion()
if success:
print("\n✅ Frame conversion fix is working correctly!")
print("📋 The streaming issue should be resolved after system restart.")
else:
print("\n❌ Frame conversion fix needs more work.")
print("\n💡 To apply the fix:")
print("1. Restart the USDA vision system")
print("2. Test streaming again")
if __name__ == "__main__":
main()

199
test_streaming.py Normal file
View File

@@ -0,0 +1,199 @@
#!/usr/bin/env python3
"""
Test script for camera streaming functionality.
This script tests the new streaming capabilities without interfering with recording.
"""
import sys
import os
import time
import requests
import threading
from datetime import datetime
# Add the current directory to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
def test_api_endpoints():
"""Test the streaming API endpoints"""
base_url = "http://localhost:8000"
print("🧪 Testing Camera Streaming API Endpoints")
print("=" * 50)
# Test system status
try:
response = requests.get(f"{base_url}/system/status", timeout=5)
if response.status_code == 200:
print("✅ System status endpoint working")
data = response.json()
print(f" System: {data.get('status', 'Unknown')}")
print(f" Camera Manager: {'Running' if data.get('camera_manager_running') else 'Stopped'}")
else:
print(f"❌ System status endpoint failed: {response.status_code}")
except Exception as e:
print(f"❌ System status endpoint error: {e}")
# Test camera list
try:
response = requests.get(f"{base_url}/cameras", timeout=5)
if response.status_code == 200:
print("✅ Camera list endpoint working")
cameras = response.json()
print(f" Found {len(cameras)} cameras: {list(cameras.keys())}")
# Test streaming for each camera
for camera_name in cameras.keys():
test_camera_streaming(base_url, camera_name)
else:
print(f"❌ Camera list endpoint failed: {response.status_code}")
except Exception as e:
print(f"❌ Camera list endpoint error: {e}")
def test_camera_streaming(base_url, camera_name):
"""Test streaming for a specific camera"""
print(f"\n🎥 Testing streaming for {camera_name}")
print("-" * 30)
# Test start streaming
try:
response = requests.post(f"{base_url}/cameras/{camera_name}/start-stream", timeout=10)
if response.status_code == 200:
print(f"✅ Start stream endpoint working for {camera_name}")
data = response.json()
print(f" Response: {data.get('message', 'No message')}")
else:
print(f"❌ Start stream failed for {camera_name}: {response.status_code}")
print(f" Error: {response.text}")
return
except Exception as e:
print(f"❌ Start stream error for {camera_name}: {e}")
return
# Wait a moment for stream to initialize
time.sleep(2)
# Test stream endpoint (just check if it responds)
try:
response = requests.get(f"{base_url}/cameras/{camera_name}/stream", timeout=5, stream=True)
if response.status_code == 200:
print(f"✅ Stream endpoint responding for {camera_name}")
print(f" Content-Type: {response.headers.get('content-type', 'Unknown')}")
# Read a small amount of data to verify it's working
chunk_count = 0
for chunk in response.iter_content(chunk_size=1024):
chunk_count += 1
if chunk_count >= 3: # Read a few chunks then stop
break
print(f" Received {chunk_count} data chunks")
else:
print(f"❌ Stream endpoint failed for {camera_name}: {response.status_code}")
except Exception as e:
print(f"❌ Stream endpoint error for {camera_name}: {e}")
# Test stop streaming
try:
response = requests.post(f"{base_url}/cameras/{camera_name}/stop-stream", timeout=5)
if response.status_code == 200:
print(f"✅ Stop stream endpoint working for {camera_name}")
data = response.json()
print(f" Response: {data.get('message', 'No message')}")
else:
print(f"❌ Stop stream failed for {camera_name}: {response.status_code}")
except Exception as e:
print(f"❌ Stop stream error for {camera_name}: {e}")
def test_concurrent_recording_and_streaming():
"""Test that streaming doesn't interfere with recording"""
base_url = "http://localhost:8000"
print("\n🔄 Testing Concurrent Recording and Streaming")
print("=" * 50)
try:
# Get available cameras
response = requests.get(f"{base_url}/cameras", timeout=5)
if response.status_code != 200:
print("❌ Cannot get camera list for concurrent test")
return
cameras = response.json()
if not cameras:
print("❌ No cameras available for concurrent test")
return
camera_name = list(cameras.keys())[0] # Use first camera
print(f"Using camera: {camera_name}")
# Start streaming
print("1. Starting streaming...")
response = requests.post(f"{base_url}/cameras/{camera_name}/start-stream", timeout=10)
if response.status_code != 200:
print(f"❌ Failed to start streaming: {response.text}")
return
time.sleep(2)
# Start recording
print("2. Starting recording...")
response = requests.post(f"{base_url}/cameras/{camera_name}/start-recording",
json={"filename": "test_concurrent_recording.avi"}, timeout=10)
if response.status_code == 200:
print("✅ Recording started successfully while streaming")
else:
print(f"❌ Failed to start recording while streaming: {response.text}")
# Let both run for a few seconds
print("3. Running both streaming and recording for 5 seconds...")
time.sleep(5)
# Stop recording
print("4. Stopping recording...")
response = requests.post(f"{base_url}/cameras/{camera_name}/stop-recording", timeout=5)
if response.status_code == 200:
print("✅ Recording stopped successfully")
else:
print(f"❌ Failed to stop recording: {response.text}")
# Stop streaming
print("5. Stopping streaming...")
response = requests.post(f"{base_url}/cameras/{camera_name}/stop-stream", timeout=5)
if response.status_code == 200:
print("✅ Streaming stopped successfully")
else:
print(f"❌ Failed to stop streaming: {response.text}")
print("✅ Concurrent test completed successfully!")
except Exception as e:
print(f"❌ Concurrent test error: {e}")
def main():
"""Main test function"""
print("🚀 USDA Vision Camera Streaming Test")
print("=" * 50)
print(f"Test started at: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
print()
# Wait for system to be ready
print("⏳ Waiting for system to be ready...")
time.sleep(3)
# Run tests
test_api_endpoints()
test_concurrent_recording_and_streaming()
print("\n" + "=" * 50)
print("🏁 Test completed!")
print("\n📋 Next Steps:")
print("1. Open camera_preview.html in your browser")
print("2. Click 'Start Stream' for any camera")
print("3. Verify live preview works without blocking recording")
print("4. Test concurrent recording and streaming")
if __name__ == "__main__":
main()

View File

@@ -81,6 +81,74 @@ class StartRecordingRequest(BaseModel):
fps: Optional[float] = Field(default=None, description="Target frames per second") fps: Optional[float] = Field(default=None, description="Target frames per second")
class CameraConfigRequest(BaseModel):
"""Camera configuration update request model"""
# Basic settings
exposure_ms: Optional[float] = Field(default=None, ge=0.1, le=1000.0, description="Exposure time in milliseconds")
gain: Optional[float] = Field(default=None, ge=0.0, le=20.0, description="Camera gain value")
target_fps: Optional[float] = Field(default=None, ge=0.0, le=120.0, description="Target frames per second")
# Image Quality Settings
sharpness: Optional[int] = Field(default=None, ge=0, le=200, description="Sharpness (0-200, default 100)")
contrast: Optional[int] = Field(default=None, ge=0, le=200, description="Contrast (0-200, default 100)")
saturation: Optional[int] = Field(default=None, ge=0, le=200, description="Saturation (0-200, default 100)")
gamma: Optional[int] = Field(default=None, ge=0, le=300, description="Gamma (0-300, default 100)")
# Noise Reduction
noise_filter_enabled: Optional[bool] = Field(default=None, description="Enable basic noise filtering")
denoise_3d_enabled: Optional[bool] = Field(default=None, description="Enable advanced 3D denoising")
# Color Settings (for color cameras)
auto_white_balance: Optional[bool] = Field(default=None, description="Enable automatic white balance")
color_temperature_preset: Optional[int] = Field(default=None, ge=0, le=10, description="Color temperature preset")
# Advanced Settings
anti_flicker_enabled: Optional[bool] = Field(default=None, description="Reduce artificial lighting flicker")
light_frequency: Optional[int] = Field(default=None, ge=0, le=1, description="Light frequency (0=50Hz, 1=60Hz)")
# HDR Settings
hdr_enabled: Optional[bool] = Field(default=None, description="Enable High Dynamic Range")
hdr_gain_mode: Optional[int] = Field(default=None, ge=0, le=3, description="HDR processing mode")
class CameraConfigResponse(BaseModel):
"""Camera configuration response model"""
name: str
machine_topic: str
storage_path: str
enabled: bool
# Basic settings
exposure_ms: float
gain: float
target_fps: float
# Image Quality Settings
sharpness: int
contrast: int
saturation: int
gamma: int
# Noise Reduction
noise_filter_enabled: bool
denoise_3d_enabled: bool
# Color Settings
auto_white_balance: bool
color_temperature_preset: int
# Advanced Settings
anti_flicker_enabled: bool
light_frequency: int
bit_depth: int
# HDR Settings
hdr_enabled: bool
hdr_gain_mode: int
class StartRecordingResponse(BaseModel): class StartRecordingResponse(BaseModel):
"""Start recording response model""" """Start recording response model"""

View File

@@ -13,7 +13,7 @@ import threading
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Depends, Query from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect, Depends, Query
from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse from fastapi.responses import JSONResponse, StreamingResponse
import uvicorn import uvicorn
from ..core.config import Config from ..core.config import Config
@@ -243,6 +243,149 @@ class APIServer:
self.logger.error(f"Error testing camera connection: {e}") self.logger.error(f"Error testing camera connection: {e}")
raise HTTPException(status_code=500, detail=str(e)) raise HTTPException(status_code=500, detail=str(e))
@self.app.get("/cameras/{camera_name}/stream")
async def camera_stream(camera_name: str):
"""Get live MJPEG stream from camera"""
try:
if not self.camera_manager:
raise HTTPException(status_code=503, detail="Camera manager not available")
# Get camera streamer
streamer = self.camera_manager.get_camera_streamer(camera_name)
if not streamer:
raise HTTPException(status_code=404, detail=f"Camera {camera_name} not found")
# Start streaming if not already active
if not streamer.is_streaming():
success = streamer.start_streaming()
if not success:
raise HTTPException(status_code=500, detail=f"Failed to start streaming for camera {camera_name}")
# Return MJPEG stream
return StreamingResponse(streamer.get_frame_generator(), media_type="multipart/x-mixed-replace; boundary=frame")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"Error starting camera stream: {e}")
raise HTTPException(status_code=500, detail=str(e))
@self.app.post("/cameras/{camera_name}/start-stream")
async def start_camera_stream(camera_name: str):
"""Start streaming for a camera"""
try:
if not self.camera_manager:
raise HTTPException(status_code=503, detail="Camera manager not available")
success = self.camera_manager.start_camera_streaming(camera_name)
if success:
return {"success": True, "message": f"Started streaming for camera {camera_name}"}
else:
return {"success": False, "message": f"Failed to start streaming for camera {camera_name}"}
except Exception as e:
self.logger.error(f"Error starting camera stream: {e}")
raise HTTPException(status_code=500, detail=str(e))
@self.app.post("/cameras/{camera_name}/stop-stream")
async def stop_camera_stream(camera_name: str):
"""Stop streaming for a camera"""
try:
if not self.camera_manager:
raise HTTPException(status_code=503, detail="Camera manager not available")
success = self.camera_manager.stop_camera_streaming(camera_name)
if success:
return {"success": True, "message": f"Stopped streaming for camera {camera_name}"}
else:
return {"success": False, "message": f"Failed to stop streaming for camera {camera_name}"}
except Exception as e:
self.logger.error(f"Error stopping camera stream: {e}")
raise HTTPException(status_code=500, detail=str(e))
@self.app.get("/cameras/{camera_name}/config", response_model=CameraConfigResponse)
async def get_camera_config(camera_name: str):
"""Get camera configuration"""
try:
if not self.camera_manager:
raise HTTPException(status_code=503, detail="Camera manager not available")
config = self.camera_manager.get_camera_config(camera_name)
if not config:
raise HTTPException(status_code=404, detail=f"Camera {camera_name} not found")
return CameraConfigResponse(
name=config.name,
machine_topic=config.machine_topic,
storage_path=config.storage_path,
enabled=config.enabled,
exposure_ms=config.exposure_ms,
gain=config.gain,
target_fps=config.target_fps,
sharpness=config.sharpness,
contrast=config.contrast,
saturation=config.saturation,
gamma=config.gamma,
noise_filter_enabled=config.noise_filter_enabled,
denoise_3d_enabled=config.denoise_3d_enabled,
auto_white_balance=config.auto_white_balance,
color_temperature_preset=config.color_temperature_preset,
anti_flicker_enabled=config.anti_flicker_enabled,
light_frequency=config.light_frequency,
bit_depth=config.bit_depth,
hdr_enabled=config.hdr_enabled,
hdr_gain_mode=config.hdr_gain_mode,
)
except HTTPException:
raise
except Exception as e:
self.logger.error(f"Error getting camera config: {e}")
raise HTTPException(status_code=500, detail=str(e))
@self.app.put("/cameras/{camera_name}/config")
async def update_camera_config(camera_name: str, request: CameraConfigRequest):
"""Update camera configuration"""
try:
if not self.camera_manager:
raise HTTPException(status_code=503, detail="Camera manager not available")
# Convert request to dict, excluding None values
config_updates = {k: v for k, v in request.dict().items() if v is not None}
if not config_updates:
raise HTTPException(status_code=400, detail="No configuration updates provided")
success = self.camera_manager.update_camera_config(camera_name, **config_updates)
if success:
return {"success": True, "message": f"Camera {camera_name} configuration updated", "updated_settings": list(config_updates.keys())}
else:
raise HTTPException(status_code=404, detail=f"Camera {camera_name} not found or update failed")
except HTTPException:
raise
except Exception as e:
self.logger.error(f"Error updating camera config: {e}")
raise HTTPException(status_code=500, detail=str(e))
@self.app.post("/cameras/{camera_name}/apply-config")
async def apply_camera_config(camera_name: str):
"""Apply current configuration to active camera (requires camera restart)"""
try:
if not self.camera_manager:
raise HTTPException(status_code=503, detail="Camera manager not available")
success = self.camera_manager.apply_camera_config(camera_name)
if success:
return {"success": True, "message": f"Configuration applied to camera {camera_name}"}
else:
return {"success": False, "message": f"Failed to apply configuration to camera {camera_name}"}
except Exception as e:
self.logger.error(f"Error applying camera config: {e}")
raise HTTPException(status_code=500, detail=str(e))
@self.app.post("/cameras/{camera_name}/reconnect", response_model=CameraRecoveryResponse) @self.app.post("/cameras/{camera_name}/reconnect", response_model=CameraRecoveryResponse)
async def reconnect_camera(camera_name: str): async def reconnect_camera(camera_name: str):
"""Reconnect to a camera""" """Reconnect to a camera"""

View File

@@ -8,5 +8,6 @@ using the camera SDK library (mvsdk).
from .manager import CameraManager from .manager import CameraManager
from .recorder import CameraRecorder from .recorder import CameraRecorder
from .monitor import CameraMonitor from .monitor import CameraMonitor
from .streamer import CameraStreamer
__all__ = ["CameraManager", "CameraRecorder", "CameraMonitor"] __all__ = ["CameraManager", "CameraRecorder", "CameraMonitor", "CameraStreamer"]

View File

@@ -22,6 +22,7 @@ from ..core.events import EventSystem, EventType, Event, publish_camera_status_c
from ..core.timezone_utils import format_filename_timestamp from ..core.timezone_utils import format_filename_timestamp
from .recorder import CameraRecorder from .recorder import CameraRecorder
from .monitor import CameraMonitor from .monitor import CameraMonitor
from .streamer import CameraStreamer
from .sdk_config import initialize_sdk_with_suppression from .sdk_config import initialize_sdk_with_suppression
@@ -40,6 +41,7 @@ class CameraManager:
# Camera management # Camera management
self.available_cameras: List[Any] = [] # mvsdk camera device info self.available_cameras: List[Any] = [] # mvsdk camera device info
self.camera_recorders: Dict[str, CameraRecorder] = {} # camera_name -> recorder self.camera_recorders: Dict[str, CameraRecorder] = {} # camera_name -> recorder
self.camera_streamers: Dict[str, CameraStreamer] = {} # camera_name -> streamer
self.camera_monitor: Optional[CameraMonitor] = None self.camera_monitor: Optional[CameraMonitor] = None
# Threading # Threading
@@ -71,6 +73,9 @@ class CameraManager:
# Initialize camera recorders # Initialize camera recorders
self._initialize_recorders() self._initialize_recorders()
# Initialize camera streamers
self._initialize_streamers()
self.logger.info("Camera manager started successfully") self.logger.info("Camera manager started successfully")
return True return True
@@ -93,6 +98,12 @@ class CameraManager:
recorder.stop_recording() recorder.stop_recording()
recorder.cleanup() recorder.cleanup()
# Stop all active streaming
with self._lock:
for streamer in self.camera_streamers.values():
if streamer.is_streaming():
streamer.stop_streaming()
self.logger.info("Camera manager stopped") self.logger.info("Camera manager stopped")
def _discover_cameras(self) -> None: def _discover_cameras(self) -> None:
@@ -427,3 +438,104 @@ class CameraManager:
self.logger.error(f"Error reinitializing camera {camera_name}: {e}") self.logger.error(f"Error reinitializing camera {camera_name}: {e}")
self.state_manager.update_camera_status(name=camera_name, status="error", device_info={"error": str(e)}) self.state_manager.update_camera_status(name=camera_name, status="error", device_info={"error": str(e)})
return False return False
def _initialize_streamers(self) -> None:
"""Initialize camera streamers for configured cameras"""
with self._lock:
for camera_config in self.config.cameras:
if not camera_config.enabled:
continue
try:
# Find matching physical camera
device_info = self._find_camera_device(camera_config.name)
if device_info is None:
self.logger.warning(f"No physical camera found for streaming: {camera_config.name}")
continue
# Create streamer
streamer = CameraStreamer(camera_config=camera_config, device_info=device_info, state_manager=self.state_manager, event_system=self.event_system)
# Add streamer to the list
self.camera_streamers[camera_config.name] = streamer
self.logger.info(f"Successfully created streamer for camera: {camera_config.name}")
except Exception as e:
self.logger.error(f"Error initializing streamer for {camera_config.name}: {e}")
def get_camera_streamer(self, camera_name: str) -> Optional[CameraStreamer]:
"""Get camera streamer for a specific camera"""
return self.camera_streamers.get(camera_name)
def start_camera_streaming(self, camera_name: str) -> bool:
"""Start streaming for a specific camera"""
streamer = self.camera_streamers.get(camera_name)
if not streamer:
self.logger.error(f"Camera streamer not found: {camera_name}")
return False
return streamer.start_streaming()
def stop_camera_streaming(self, camera_name: str) -> bool:
"""Stop streaming for a specific camera"""
streamer = self.camera_streamers.get(camera_name)
if not streamer:
self.logger.error(f"Camera streamer not found: {camera_name}")
return False
return streamer.stop_streaming()
def is_camera_streaming(self, camera_name: str) -> bool:
"""Check if a camera is currently streaming"""
streamer = self.camera_streamers.get(camera_name)
if not streamer:
return False
return streamer.is_streaming()
def get_camera_config(self, camera_name: str) -> Optional[CameraConfig]:
"""Get camera configuration"""
return self.config.get_camera_by_name(camera_name)
def update_camera_config(self, camera_name: str, **kwargs) -> bool:
"""Update camera configuration and save to config file"""
try:
# Update the configuration
success = self.config.update_camera_config(camera_name, **kwargs)
if success:
self.logger.info(f"Updated configuration for camera {camera_name}: {kwargs}")
return True
else:
self.logger.error(f"Failed to update configuration for camera {camera_name}")
return False
except Exception as e:
self.logger.error(f"Error updating camera configuration: {e}")
return False
def apply_camera_config(self, camera_name: str) -> bool:
"""Apply current configuration to active camera (requires camera restart)"""
try:
# Get the recorder for this camera
recorder = self.camera_recorders.get(camera_name)
if not recorder:
self.logger.error(f"Camera recorder not found: {camera_name}")
return False
# Stop recording if active
was_recording = recorder.is_recording()
if was_recording:
recorder.stop_recording()
# Reinitialize the camera with new settings
success = self.reinitialize_failed_camera(camera_name)
if success:
self.logger.info(f"Successfully applied configuration to camera {camera_name}")
return True
else:
self.logger.error(f"Failed to apply configuration to camera {camera_name}")
return False
except Exception as e:
self.logger.error(f"Error applying camera configuration: {e}")
return False

View File

@@ -328,6 +328,117 @@ class CameraRecorder:
self.logger.error(f"Error updating camera settings: {e}") self.logger.error(f"Error updating camera settings: {e}")
return False return False
def update_advanced_camera_settings(self, **kwargs) -> bool:
"""Update advanced camera settings dynamically"""
if not self.hCamera:
self.logger.error("Camera not initialized")
return False
try:
settings_updated = False
# Update basic settings
if "exposure_ms" in kwargs and kwargs["exposure_ms"] is not None:
mvsdk.CameraSetAeState(self.hCamera, 0)
exposure_us = int(kwargs["exposure_ms"] * 1000)
mvsdk.CameraSetExposureTime(self.hCamera, exposure_us)
self.camera_config.exposure_ms = kwargs["exposure_ms"]
settings_updated = True
if "gain" in kwargs and kwargs["gain"] is not None:
gain_value = int(kwargs["gain"] * 100)
mvsdk.CameraSetAnalogGain(self.hCamera, gain_value)
self.camera_config.gain = kwargs["gain"]
settings_updated = True
if "target_fps" in kwargs and kwargs["target_fps"] is not None:
self.camera_config.target_fps = kwargs["target_fps"]
settings_updated = True
# Update image quality settings
if "sharpness" in kwargs and kwargs["sharpness"] is not None:
mvsdk.CameraSetSharpness(self.hCamera, kwargs["sharpness"])
self.camera_config.sharpness = kwargs["sharpness"]
settings_updated = True
if "contrast" in kwargs and kwargs["contrast"] is not None:
mvsdk.CameraSetContrast(self.hCamera, kwargs["contrast"])
self.camera_config.contrast = kwargs["contrast"]
settings_updated = True
if "gamma" in kwargs and kwargs["gamma"] is not None:
mvsdk.CameraSetGamma(self.hCamera, kwargs["gamma"])
self.camera_config.gamma = kwargs["gamma"]
settings_updated = True
if "saturation" in kwargs and kwargs["saturation"] is not None and not self.monoCamera:
mvsdk.CameraSetSaturation(self.hCamera, kwargs["saturation"])
self.camera_config.saturation = kwargs["saturation"]
settings_updated = True
# Update noise reduction settings
if "noise_filter_enabled" in kwargs and kwargs["noise_filter_enabled"] is not None:
# Note: Noise filter settings may require camera restart to take effect
self.camera_config.noise_filter_enabled = kwargs["noise_filter_enabled"]
settings_updated = True
if "denoise_3d_enabled" in kwargs and kwargs["denoise_3d_enabled"] is not None:
# Note: 3D denoise settings may require camera restart to take effect
self.camera_config.denoise_3d_enabled = kwargs["denoise_3d_enabled"]
settings_updated = True
# Update color settings (for color cameras)
if not self.monoCamera:
if "auto_white_balance" in kwargs and kwargs["auto_white_balance"] is not None:
mvsdk.CameraSetWbMode(self.hCamera, kwargs["auto_white_balance"])
self.camera_config.auto_white_balance = kwargs["auto_white_balance"]
settings_updated = True
if "color_temperature_preset" in kwargs and kwargs["color_temperature_preset"] is not None:
if not self.camera_config.auto_white_balance:
mvsdk.CameraSetPresetClrTemp(self.hCamera, kwargs["color_temperature_preset"])
self.camera_config.color_temperature_preset = kwargs["color_temperature_preset"]
settings_updated = True
# Update advanced settings
if "anti_flicker_enabled" in kwargs and kwargs["anti_flicker_enabled"] is not None:
mvsdk.CameraSetAntiFlick(self.hCamera, kwargs["anti_flicker_enabled"])
self.camera_config.anti_flicker_enabled = kwargs["anti_flicker_enabled"]
settings_updated = True
if "light_frequency" in kwargs and kwargs["light_frequency"] is not None:
mvsdk.CameraSetLightFrequency(self.hCamera, kwargs["light_frequency"])
self.camera_config.light_frequency = kwargs["light_frequency"]
settings_updated = True
# Update HDR settings (if supported)
if "hdr_enabled" in kwargs and kwargs["hdr_enabled"] is not None:
try:
mvsdk.CameraSetHDR(self.hCamera, 1 if kwargs["hdr_enabled"] else 0)
self.camera_config.hdr_enabled = kwargs["hdr_enabled"]
settings_updated = True
except AttributeError:
self.logger.warning("HDR functions not available in this SDK version")
if "hdr_gain_mode" in kwargs and kwargs["hdr_gain_mode"] is not None:
try:
if self.camera_config.hdr_enabled:
mvsdk.CameraSetHDRGainMode(self.hCamera, kwargs["hdr_gain_mode"])
self.camera_config.hdr_gain_mode = kwargs["hdr_gain_mode"]
settings_updated = True
except AttributeError:
self.logger.warning("HDR gain mode functions not available in this SDK version")
if settings_updated:
updated_settings = [k for k, v in kwargs.items() if v is not None]
self.logger.info(f"Updated camera settings: {updated_settings}")
return settings_updated
except Exception as e:
self.logger.error(f"Error updating advanced camera settings: {e}")
return False
def start_recording(self, filename: str) -> bool: def start_recording(self, filename: str) -> bool:
"""Start video recording""" """Start video recording"""
with self._lock: with self._lock:

View File

@@ -0,0 +1,320 @@
"""
Camera Streamer for the USDA Vision Camera System.
This module provides live preview streaming from GigE cameras without blocking recording.
It creates a separate camera connection for streaming that doesn't interfere with recording.
"""
import sys
import os
import threading
import time
import logging
import cv2
import numpy as np
import contextlib
from typing import Optional, Dict, Any, Generator
from datetime import datetime
import queue
# Add camera SDK to path
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "camera_sdk"))
import mvsdk
from ..core.config import CameraConfig
from ..core.state_manager import StateManager
from ..core.events import EventSystem
from .sdk_config import ensure_sdk_initialized
@contextlib.contextmanager
def suppress_camera_errors():
"""Context manager to temporarily suppress camera SDK error output"""
# Save original file descriptors
original_stderr = os.dup(2)
original_stdout = os.dup(1)
try:
# Redirect stderr and stdout to devnull
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, 2) # stderr
os.dup2(devnull, 1) # stdout (in case SDK uses stdout)
os.close(devnull)
yield
finally:
# Restore original file descriptors
os.dup2(original_stderr, 2)
os.dup2(original_stdout, 1)
os.close(original_stderr)
os.close(original_stdout)
class CameraStreamer:
"""Provides live preview streaming from cameras without blocking recording"""
def __init__(self, camera_config: CameraConfig, device_info: Any, state_manager: StateManager, event_system: EventSystem):
self.camera_config = camera_config
self.device_info = device_info
self.state_manager = state_manager
self.event_system = event_system
self.logger = logging.getLogger(f"{__name__}.{camera_config.name}")
# Camera handle and properties (separate from recorder)
self.hCamera: Optional[int] = None
self.cap = None
self.monoCamera = False
self.frame_buffer = None
self.frame_buffer_size = 0
# Streaming state
self.streaming = False
self._streaming_thread: Optional[threading.Thread] = None
self._stop_streaming_event = threading.Event()
self._frame_queue = queue.Queue(maxsize=5) # Buffer for latest frames
self._lock = threading.RLock()
# Stream settings (optimized for preview)
self.preview_fps = 10.0 # Lower FPS for preview to reduce load
self.preview_quality = 70 # JPEG quality for streaming
def start_streaming(self) -> bool:
"""Start streaming preview frames"""
with self._lock:
if self.streaming:
self.logger.warning("Streaming already active")
return True
try:
# Initialize camera for streaming
if not self._initialize_camera():
return False
# Start streaming thread
self._stop_streaming_event.clear()
self._streaming_thread = threading.Thread(target=self._streaming_loop, daemon=True)
self._streaming_thread.start()
self.streaming = True
self.logger.info(f"Started streaming for camera: {self.camera_config.name}")
return True
except Exception as e:
self.logger.error(f"Error starting streaming: {e}")
self._cleanup_camera()
return False
def stop_streaming(self) -> bool:
"""Stop streaming preview frames"""
with self._lock:
if not self.streaming:
return True
try:
# Signal streaming thread to stop
self._stop_streaming_event.set()
# Wait for thread to finish
if self._streaming_thread and self._streaming_thread.is_alive():
self._streaming_thread.join(timeout=5.0)
# Cleanup camera resources
self._cleanup_camera()
self.streaming = False
self.logger.info(f"Stopped streaming for camera: {self.camera_config.name}")
return True
except Exception as e:
self.logger.error(f"Error stopping streaming: {e}")
return False
def get_latest_frame(self) -> Optional[bytes]:
"""Get the latest frame as JPEG bytes for streaming"""
try:
# Get latest frame from queue (non-blocking)
frame = self._frame_queue.get_nowait()
# Encode as JPEG
_, buffer = cv2.imencode(".jpg", frame, [cv2.IMWRITE_JPEG_QUALITY, self.preview_quality])
return buffer.tobytes()
except queue.Empty:
return None
except Exception as e:
self.logger.error(f"Error getting latest frame: {e}")
return None
def get_frame_generator(self) -> Generator[bytes, None, None]:
"""Generator for MJPEG streaming"""
while self.streaming:
frame_bytes = self.get_latest_frame()
if frame_bytes:
yield (b"--frame\r\n" b"Content-Type: image/jpeg\r\n\r\n" + frame_bytes + b"\r\n")
else:
time.sleep(0.1) # Wait a bit if no frame available
def _initialize_camera(self) -> bool:
"""Initialize camera for streaming (separate from recording)"""
try:
self.logger.info(f"Initializing camera for streaming: {self.camera_config.name}")
# Ensure SDK is initialized
ensure_sdk_initialized()
# Check if device_info is valid
if self.device_info is None:
self.logger.error("No device info provided for camera initialization")
return False
# Initialize camera (suppress output to avoid MVCAMAPI error messages)
with suppress_camera_errors():
self.hCamera = mvsdk.CameraInit(self.device_info, -1, -1)
self.logger.info("Camera initialized successfully for streaming")
# Get camera capabilities
self.cap = mvsdk.CameraGetCapability(self.hCamera)
# Determine if camera is monochrome
self.monoCamera = self.cap.sIspCapacity.bMonoSensor != 0
# Set output format based on camera type and bit depth
if self.monoCamera:
mvsdk.CameraSetIspOutFormat(self.hCamera, mvsdk.CAMERA_MEDIA_TYPE_MONO8)
else:
mvsdk.CameraSetIspOutFormat(self.hCamera, mvsdk.CAMERA_MEDIA_TYPE_BGR8)
# Configure camera settings for streaming (optimized for preview)
self._configure_streaming_settings()
# Allocate frame buffer
bytes_per_pixel = 1 if self.monoCamera else 3
self.frame_buffer_size = self.cap.sResolutionRange.iWidthMax * self.cap.sResolutionRange.iHeightMax * bytes_per_pixel
self.frame_buffer = mvsdk.CameraAlignMalloc(self.frame_buffer_size, 16)
# Start camera
mvsdk.CameraPlay(self.hCamera)
self.logger.info("Camera started successfully for streaming")
return True
except Exception as e:
self.logger.error(f"Error initializing camera for streaming: {e}")
self._cleanup_camera()
return False
def _configure_streaming_settings(self):
"""Configure camera settings optimized for streaming"""
try:
# Set trigger mode to free run for continuous streaming
mvsdk.CameraSetTriggerMode(self.hCamera, 0)
# Set exposure (use a reasonable default for preview)
exposure_us = int(self.camera_config.exposure_ms * 1000)
mvsdk.CameraSetExposureTime(self.hCamera, exposure_us)
# Set gain
mvsdk.CameraSetAnalogGain(self.hCamera, int(self.camera_config.gain))
# Set frame rate for streaming (lower than recording)
if hasattr(mvsdk, "CameraSetFrameSpeed"):
mvsdk.CameraSetFrameSpeed(self.hCamera, int(self.preview_fps))
self.logger.info(f"Streaming settings configured: exposure={self.camera_config.exposure_ms}ms, gain={self.camera_config.gain}, fps={self.preview_fps}")
except Exception as e:
self.logger.warning(f"Could not configure some streaming settings: {e}")
def _streaming_loop(self):
"""Main streaming loop that captures frames continuously"""
self.logger.info("Starting streaming loop")
try:
while not self._stop_streaming_event.is_set():
try:
# Capture frame with timeout
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(self.hCamera, 200) # 200ms timeout
# Process frame
mvsdk.CameraImageProcess(self.hCamera, pRawData, self.frame_buffer, FrameHead)
# Convert to OpenCV format
frame = self._convert_frame_to_opencv(FrameHead)
if frame is not None:
# Add frame to queue (replace oldest if queue is full)
try:
self._frame_queue.put_nowait(frame)
except queue.Full:
# Remove oldest frame and add new one
try:
self._frame_queue.get_nowait()
self._frame_queue.put_nowait(frame)
except queue.Empty:
pass
# Release buffer
mvsdk.CameraReleaseImageBuffer(self.hCamera, pRawData)
# Control frame rate
time.sleep(1.0 / self.preview_fps)
except Exception as e:
if not self._stop_streaming_event.is_set():
self.logger.error(f"Error in streaming loop: {e}")
time.sleep(0.1) # Brief pause before retrying
except Exception as e:
self.logger.error(f"Fatal error in streaming loop: {e}")
finally:
self.logger.info("Streaming loop ended")
def _convert_frame_to_opencv(self, FrameHead) -> Optional[np.ndarray]:
"""Convert camera frame to OpenCV format"""
try:
# Convert the frame buffer memory address to a proper buffer
# that numpy can work with using mvsdk.c_ubyte
frame_data_buffer = (mvsdk.c_ubyte * FrameHead.uBytes).from_address(self.frame_buffer)
if self.monoCamera:
# Monochrome camera
frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
frame = frame_data.reshape((FrameHead.iHeight, FrameHead.iWidth))
# Convert to 3-channel for consistency
frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
else:
# Color camera (BGR format)
frame_data = np.frombuffer(frame_data_buffer, dtype=np.uint8)
frame = frame_data.reshape((FrameHead.iHeight, FrameHead.iWidth, 3))
return frame
except Exception as e:
self.logger.error(f"Error converting frame: {e}")
return None
def _cleanup_camera(self):
"""Clean up camera resources"""
try:
if self.frame_buffer:
mvsdk.CameraAlignFree(self.frame_buffer)
self.frame_buffer = None
if self.hCamera is not None:
mvsdk.CameraUnInit(self.hCamera)
self.hCamera = None
self.logger.info("Camera resources cleaned up for streaming")
except Exception as e:
self.logger.error(f"Error cleaning up camera resources: {e}")
def is_streaming(self) -> bool:
"""Check if streaming is active"""
return self.streaming
def __del__(self):
"""Destructor to ensure cleanup"""
if self.streaming:
self.stop_streaming()