RTSP Fully Implemented

This commit is contained in:
salirezav
2025-11-01 14:58:25 -04:00
parent 43e1dace8c
commit 1a8aa8a027
5 changed files with 204 additions and 23 deletions

View File

@@ -487,17 +487,19 @@ class CameraManager:
self.logger.warning(f"No physical camera found for streaming: {camera_config.name}")
continue
# Create streamer
streamer = CameraStreamer(camera_config=camera_config, device_info=device_info, state_manager=self.state_manager, event_system=self.event_system)
# Get recorder reference (for bidirectional sharing)
recorder = self.camera_recorders.get(camera_config.name)
# Create streamer (pass recorder reference for camera sharing)
streamer = CameraStreamer(camera_config=camera_config, device_info=device_info, state_manager=self.state_manager, event_system=self.event_system, recorder=recorder)
# Add streamer to the list
self.camera_streamers[camera_config.name] = streamer
# Update recorder's streamer reference if recorder already exists
recorder = self.camera_recorders.get(camera_config.name)
# Update recorder's streamer reference if recorder exists (bidirectional reference)
if recorder:
recorder.streamer = streamer
self.logger.debug(f"Updated streamer reference for recorder {camera_config.name}")
self.logger.debug(f"Updated bidirectional references: recorder <-> streamer for {camera_config.name}")
self.logger.info(f"Successfully created streamer for camera: {camera_config.name}")

View File

@@ -12,6 +12,7 @@ import logging
import cv2
import numpy as np
import contextlib
import queue
from typing import Optional, Dict, Any
from datetime import datetime
from pathlib import Path
@@ -627,13 +628,41 @@ class CameraRecorder:
use_streamer_frames: If True, read frames from streamer's frame queue instead of capturing directly
"""
try:
# Initialize video writer
if not self._initialize_video_writer():
# For streamer frames, we need to get a frame first to determine dimensions
initial_frame = None
if use_streamer_frames and self.streamer:
self.logger.info("Waiting for first frame from streamer to determine video dimensions...")
# Wait for first frame (with timeout)
timeout_start = time.time()
while initial_frame is None and time.time() - timeout_start < 5.0:
if self._stop_recording_event.is_set():
self.logger.error("Stop event set before getting initial frame")
return
if not self.streamer.streaming:
self.logger.error("Streamer stopped before getting initial frame")
return
try:
initial_frame = self.streamer._recording_frame_queue.get(timeout=0.5)
self.logger.info(f"Got initial frame from streamer: {initial_frame.shape if initial_frame is not None else 'None'}")
except Exception:
continue
if initial_frame is None:
self.logger.error("Failed to get initial frame from streamer for video writer initialization")
return
# Initialize video writer (with initial frame dimensions if using streamer frames)
if not self._initialize_video_writer(use_streamer_frames=use_streamer_frames, initial_frame=initial_frame):
self.logger.error("Failed to initialize video writer")
return
self.logger.info(f"Recording loop started (using {'streamer frames' if use_streamer_frames else 'direct capture'})")
# Write the initial frame if we got one from streamer
if initial_frame is not None and self.video_writer:
self.video_writer.write(initial_frame)
self.frame_count += 1
while not self._stop_recording_event.is_set():
try:
if use_streamer_frames and self.streamer:
@@ -672,6 +701,33 @@ class CameraRecorder:
if frame is not None and self.video_writer:
self.video_writer.write(frame)
self.frame_count += 1
# If streamer is active and using our shared camera, populate its queues
if self.streamer and self.streamer.streaming and self.streamer._using_shared_camera:
try:
# Populate streamer's MJPEG queue
try:
self.streamer._frame_queue.put_nowait(frame.copy())
except queue.Full:
try:
self.streamer._frame_queue.get_nowait()
self.streamer._frame_queue.put_nowait(frame.copy())
except queue.Empty:
pass
# Populate streamer's RTSP queue if RTSP is active
if self.streamer.rtsp_streaming:
try:
self.streamer._rtsp_frame_queue.put_nowait(frame.copy())
except queue.Full:
try:
self.streamer._rtsp_frame_queue.get_nowait()
self.streamer._rtsp_frame_queue.put_nowait(frame.copy())
except queue.Empty:
pass
except Exception as e:
# Non-critical error - logging is optional to avoid spam
pass
# Control frame rate (skip sleep if target_fps is 0 for maximum speed)
if self.camera_config.target_fps > 0:
@@ -698,17 +754,49 @@ class CameraRecorder:
# Note: Don't set self.recording = False here - let stop_recording() handle it
# to avoid race conditions where stop_recording thinks recording already stopped
def _initialize_video_writer(self) -> bool:
"""Initialize OpenCV video writer"""
def _initialize_video_writer(self, use_streamer_frames: bool = False, initial_frame: Optional[np.ndarray] = None) -> bool:
"""Initialize OpenCV video writer
Args:
use_streamer_frames: If True, using frames from streamer (camera handle may be None)
initial_frame: Optional initial frame to get dimensions from (used when use_streamer_frames=True)
"""
try:
# Get frame dimensions by capturing a test frame
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(self.hCamera, 1000)
mvsdk.CameraImageProcess(self.hCamera, pRawData, self.frame_buffer, FrameHead)
mvsdk.CameraReleaseImageBuffer(self.hCamera, pRawData)
# Get frame dimensions
if use_streamer_frames and initial_frame is not None:
# Get dimensions from initial frame
frame_height, frame_width = initial_frame.shape[:2]
frame_size = (frame_width, frame_height)
self.logger.info(f"Using frame dimensions from streamer frame: {frame_size}")
elif self.hCamera:
# Get frame dimensions by capturing a test frame from camera
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(self.hCamera, 1000)
mvsdk.CameraImageProcess(self.hCamera, pRawData, self.frame_buffer, FrameHead)
mvsdk.CameraReleaseImageBuffer(self.hCamera, pRawData)
frame_size = (FrameHead.iWidth, FrameHead.iHeight)
else:
# Fallback: try to get dimensions from streamer's camera if available
if self.streamer and self.streamer.hCamera:
try:
with suppress_camera_errors():
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(self.streamer.hCamera, 1000)
mvsdk.CameraReleaseImageBuffer(self.streamer.hCamera, pRawData)
frame_size = (FrameHead.iWidth, FrameHead.iHeight)
self.logger.info(f"Got frame dimensions from streamer's camera: {frame_size}")
except Exception as e:
self.logger.error(f"Failed to get frame dimensions from streamer camera: {e}")
# Use camera config defaults as last resort
camera_config = self.camera_config
frame_size = (camera_config.resolution_width or 1280, camera_config.resolution_height or 1024)
self.logger.warning(f"Using default frame size from config: {frame_size}")
else:
# Use camera config defaults as last resort
camera_config = self.camera_config
frame_size = (camera_config.resolution_width or 1280, camera_config.resolution_height or 1024)
self.logger.warning(f"Using default frame size from config: {frame_size}")
# Set up video writer with configured codec
fourcc = cv2.VideoWriter_fourcc(*self.camera_config.video_codec)
frame_size = (FrameHead.iWidth, FrameHead.iHeight)
# Use 30 FPS for video writer if target_fps is 0 (unlimited)
video_fps = self.camera_config.target_fps if self.camera_config.target_fps > 0 else 30.0
@@ -779,15 +867,60 @@ class CameraRecorder:
"""Clean up recording resources"""
try:
if self.video_writer:
# CRITICAL: Must release video writer properly to finalize MP4 file (write moov atom)
# OpenCV VideoWriter writes the moov atom only when release() is called
try:
# Ensure all frames are flushed
if hasattr(self.video_writer, 'flush'):
self.video_writer.flush()
except:
pass
# Release writer - this writes the moov atom and finalizes the file
self.video_writer.release()
self.video_writer = None
self.logger.debug("Video writer released")
self.logger.info(f"Video writer released and file closed (recorded {self.frame_count} frames)")
# Small delay to ensure file system sync
import time
time.sleep(0.1)
# Verify file exists and has content
if self.output_filename and os.path.exists(self.output_filename):
file_size = os.path.getsize(self.output_filename)
self.logger.info(f"Video file size: {file_size / (1024*1024):.2f} MB ({file_size} bytes)")
if file_size == 0:
self.logger.error("ERROR: Video file is empty (0 bytes)!")
elif file_size < 1024: # Less than 1KB is suspicious
self.logger.warning(f"WARNING: Video file is very small ({file_size} bytes) - may be corrupted")
else:
# Verify file has moov atom by checking if it's readable
try:
import subprocess
result = subprocess.run(
['ffprobe', '-v', 'error', '-show_format', self.output_filename],
capture_output=True,
timeout=5,
stderr=subprocess.PIPE
)
if result.returncode != 0:
stderr = result.stderr.decode('utf-8', errors='ignore')
if 'moov atom not found' in stderr:
self.logger.error("ERROR: Video file is missing moov atom (metadata) - file is corrupted/incomplete!")
else:
self.logger.warning(f"WARNING: ffprobe check failed: {stderr[:200]}")
else:
self.logger.info("Video file validated: moov atom present, file is readable")
except Exception as e:
self.logger.debug(f"Could not validate video file with ffprobe: {e}")
# Note: Don't set self.recording = False here - let stop_recording() control the flag
# to maintain proper state synchronization
except Exception as e:
self.logger.error(f"Error during recording cleanup: {e}")
import traceback
self.logger.error(f"Traceback: {traceback.format_exc()}")
def test_connection(self) -> bool:
"""Test camera connection"""

View File

@@ -55,19 +55,21 @@ def suppress_camera_errors():
class CameraStreamer:
"""Provides live preview streaming from cameras without blocking recording"""
def __init__(self, camera_config: CameraConfig, device_info: Any, state_manager: StateManager, event_system: EventSystem):
def __init__(self, camera_config: CameraConfig, device_info: Any, state_manager: StateManager, event_system: EventSystem, recorder=None):
self.camera_config = camera_config
self.device_info = device_info
self.state_manager = state_manager
self.event_system = event_system
self.recorder = recorder # Reference to CameraRecorder for camera sharing (reverse direction)
self.logger = logging.getLogger(f"{__name__}.{camera_config.name}")
# Camera handle and properties (separate from recorder)
# Camera handle and properties (separate from recorder, or shared with recorder)
self.hCamera: Optional[int] = None
self.cap = None
self.monoCamera = False
self.frame_buffer = None
self.frame_buffer_size = 0
self._using_shared_camera = False # Flag to indicate if we're sharing recorder's camera
# Streaming state
self.streaming = False
@@ -259,6 +261,21 @@ class CameraStreamer:
try:
self.logger.info(f"Initializing camera for streaming: {self.camera_config.name}")
# Check if recorder is active and has camera open - if so, share it
if self.recorder and self.recorder.hCamera and self.recorder.recording:
self.logger.info("Recorder is active with camera open - will share recorder's camera connection")
self.hCamera = self.recorder.hCamera
# Copy camera properties from recorder
self.cap = self.recorder.cap
self.monoCamera = self.recorder.monoCamera
self.frame_buffer = self.recorder.frame_buffer
self.frame_buffer_size = self.recorder.frame_buffer_size
self._using_shared_camera = True # Mark that we're using shared camera
# Camera is already started by recorder, so we don't need to call CameraPlay
# Also, we need to populate the frame queues from recorder's frames
self.logger.info("Using recorder's camera connection for streaming - will capture frames from recorder")
return True
# Ensure SDK is initialized
ensure_sdk_initialized()
@@ -342,11 +359,16 @@ class CameraStreamer:
def _streaming_loop(self):
"""Main streaming loop that captures frames continuously"""
self.logger.info("Starting streaming loop")
self.logger.info(f"Starting streaming loop (using {'shared camera from recorder' if self._using_shared_camera else 'own camera'})")
try:
while not self._stop_streaming_event.is_set():
try:
# If using shared camera, skip capture - recorder will populate queues
if self._using_shared_camera:
time.sleep(0.1) # Just wait, recorder populates queues
continue
# Capture frame with timeout
pRawData, FrameHead = mvsdk.CameraGetImageBuffer(self.hCamera, 200) # 200ms timeout
@@ -443,13 +465,21 @@ class CameraStreamer:
def _cleanup_camera(self):
"""Clean up camera resources"""
try:
if self.frame_buffer:
# Only cleanup frame buffer if we allocated it (not sharing with recorder)
if self.frame_buffer and not self._using_shared_camera:
mvsdk.CameraAlignFree(self.frame_buffer)
self.frame_buffer = None
if self.hCamera is not None:
# Only uninitialize camera if we own it (not sharing with recorder)
if self.hCamera is not None and not self._using_shared_camera:
mvsdk.CameraUnInit(self.hCamera)
self.hCamera = None
elif self._using_shared_camera:
# Just clear references, don't free shared resources
self.hCamera = None
self.cap = None
self.frame_buffer = None
self._using_shared_camera = False
self.logger.info("Camera resources cleaned up for streaming")

View File

@@ -243,6 +243,12 @@ def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0)
Transcode video to H.264 on-the-fly using FFmpeg.
Streams H.264/MP4 that browsers can actually play.
"""
if not file_path.exists():
raise HTTPException(status_code=404, detail="Video file not found")
if file_path.stat().st_size == 0:
raise HTTPException(status_code=500, detail="Video file is empty (0 bytes)")
# FFmpeg command to transcode to H.264 with web-optimized settings
cmd = [
"ffmpeg",
@@ -272,17 +278,27 @@ def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0)
# Stream chunks
chunk_size = 8192
bytes_yielded = 0
while True:
chunk = process.stdout.read(chunk_size)
if not chunk:
break
bytes_yielded += len(chunk)
yield chunk
# Check for errors
process.wait()
if process.returncode != 0:
stderr = process.stderr.read().decode('utf-8', errors='ignore')
print(f"FFmpeg error (code {process.returncode}): {stderr}")
if bytes_yielded == 0:
raise HTTPException(status_code=500, detail=f"FFmpeg transcoding failed: {stderr[:200]}")
except HTTPException:
raise
except Exception as e:
print(f"FFmpeg transcoding error: {e}")
raise
raise HTTPException(status_code=500, detail=f"Transcoding error: {str(e)}")
@app.head("/videos/{file_id:path}/stream-transcoded")

View File

@@ -91,8 +91,8 @@ export const VideoModal: React.FC<Props> = ({ fileId, onClose }) => {
<p className="text-xs text-gray-500 dark:text-gray-400 mt-0.5">Watch your recording</p>
</div>
<a
href={src}
download
href={`${BASE}/videos/stream?file_id=${encodeURIComponent(fileId)}`}
download={fileId.split('/').pop() || 'video.mp4'}
className="px-3 py-1.5 text-xs bg-blue-600 hover:bg-blue-700 text-white rounded-md transition-colors"
onClick={(e) => e.stopPropagation()}
>