Enhance media API transcoding and video streaming capabilities
- Added support for limiting concurrent transcoding operations in the media API to prevent resource exhaustion. - Implemented functions to retrieve video duration and bitrate using ffprobe for improved streaming performance. - Enhanced the generate_transcoded_stream function to handle HTTP range requests, allowing for more efficient video playback. - Updated VideoModal component to disable fluid and responsive modes, ensuring proper container boundaries during video playback. - Improved logging throughout the transcoding process for better error tracking and debugging.
This commit is contained in:
@@ -251,3 +251,4 @@ Session completed with focus on MQTT debugging and enhanced logging for troubles
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -162,3 +162,4 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import paho.mqtt.client as mqtt
|
|||||||
|
|
||||||
from ..core.config import Config, MQTTConfig
|
from ..core.config import Config, MQTTConfig
|
||||||
from ..core.state_manager import StateManager
|
from ..core.state_manager import StateManager
|
||||||
from ..core.events import EventSystem, EventType, publish_machine_state_changed
|
from ..core.events import EventSystem, EventType
|
||||||
from .handlers import MQTTMessageHandler
|
from .handlers import MQTTMessageHandler
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ from typing import Dict, Optional
|
|||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from ..core.state_manager import StateManager, MachineState
|
from ..core.state_manager import StateManager, MachineState
|
||||||
from ..core.events import EventSystem, publish_machine_state_changed
|
from ..core.events import EventSystem, EventType
|
||||||
|
|
||||||
|
|
||||||
class MQTTMessageHandler:
|
class MQTTMessageHandler:
|
||||||
@@ -47,7 +47,16 @@ class MQTTMessageHandler:
|
|||||||
if state_changed:
|
if state_changed:
|
||||||
self.logger.info(f"📡 MQTT: Machine {machine_name} state changed to: {normalized_payload}")
|
self.logger.info(f"📡 MQTT: Machine {machine_name} state changed to: {normalized_payload}")
|
||||||
self.logger.info(f"📡 Publishing MACHINE_STATE_CHANGED event for {machine_name} -> {normalized_payload}")
|
self.logger.info(f"📡 Publishing MACHINE_STATE_CHANGED event for {machine_name} -> {normalized_payload}")
|
||||||
publish_machine_state_changed(machine_name=machine_name, state=normalized_payload, source="mqtt_handler")
|
# Use the event_system instance passed to this handler, not the global one
|
||||||
|
self.event_system.publish(
|
||||||
|
EventType.MACHINE_STATE_CHANGED,
|
||||||
|
"mqtt_handler",
|
||||||
|
{
|
||||||
|
"machine_name": machine_name,
|
||||||
|
"state": normalized_payload,
|
||||||
|
"previous_state": None
|
||||||
|
}
|
||||||
|
)
|
||||||
self.logger.info(f"✅ Published MACHINE_STATE_CHANGED event for {machine_name} -> {normalized_payload}")
|
self.logger.info(f"✅ Published MACHINE_STATE_CHANGED event for {machine_name} -> {normalized_payload}")
|
||||||
else:
|
else:
|
||||||
self.logger.info(f"📡 Machine {machine_name} state unchanged (still {normalized_payload}) - no event published")
|
self.logger.info(f"📡 Machine {machine_name} state unchanged (still {normalized_payload}) - no event published")
|
||||||
|
|||||||
@@ -103,7 +103,15 @@ class StandaloneAutoRecorder:
|
|||||||
machine_name = camera_config.machine_topic
|
machine_name = camera_config.machine_topic
|
||||||
if machine_name:
|
if machine_name:
|
||||||
mapping[machine_name] = camera_config.name
|
mapping[machine_name] = camera_config.name
|
||||||
self.logger.info(f"Auto-recording enabled: {machine_name} -> {camera_config.name}")
|
self.logger.info(f"✅ Auto-recording enabled: '{machine_name}' -> {camera_config.name}")
|
||||||
|
else:
|
||||||
|
self.logger.warning(f"⚠️ Camera {camera_config.name} has no machine_topic configured")
|
||||||
|
else:
|
||||||
|
if not camera_config.enabled:
|
||||||
|
self.logger.debug(f"Camera {camera_config.name} is disabled")
|
||||||
|
elif not camera_config.auto_start_recording_enabled:
|
||||||
|
self.logger.debug(f"Camera {camera_config.name} has auto-recording disabled")
|
||||||
|
self.logger.info(f"📋 Built machine-camera mapping with {len(mapping)} entries: {mapping}")
|
||||||
return mapping
|
return mapping
|
||||||
|
|
||||||
def _setup_mqtt(self) -> bool:
|
def _setup_mqtt(self) -> bool:
|
||||||
@@ -197,7 +205,8 @@ class StandaloneAutoRecorder:
|
|||||||
# Check if we have a camera for this machine
|
# Check if we have a camera for this machine
|
||||||
camera_name = self.machine_camera_map.get(machine_name)
|
camera_name = self.machine_camera_map.get(machine_name)
|
||||||
if not camera_name:
|
if not camera_name:
|
||||||
self.logger.debug(f"No camera mapped to machine: {machine_name}")
|
self.logger.warning(f"❌ AUTO-RECORDER: No camera mapped to machine: '{machine_name}'")
|
||||||
|
self.logger.info(f"📋 Available machine-camera mappings: {self.machine_camera_map}")
|
||||||
return
|
return
|
||||||
|
|
||||||
self.logger.info(f"📡 MQTT: Machine {machine_name} ({camera_name}) -> {state}")
|
self.logger.info(f"📡 MQTT: Machine {machine_name} ({camera_name}) -> {state}")
|
||||||
@@ -215,10 +224,12 @@ class StandaloneAutoRecorder:
|
|||||||
def _start_recording(self, camera_name: str, machine_name: str):
|
def _start_recording(self, camera_name: str, machine_name: str):
|
||||||
"""Start recording for a camera"""
|
"""Start recording for a camera"""
|
||||||
try:
|
try:
|
||||||
|
self.logger.info(f"🎬 AUTO-RECORDER: Attempting to start recording for {camera_name} (machine: {machine_name})")
|
||||||
|
|
||||||
# Check if already recording
|
# Check if already recording
|
||||||
camera_info = self.state_manager.get_camera_status(camera_name) if self.state_manager else None
|
camera_info = self.state_manager.get_camera_status(camera_name) if self.state_manager else None
|
||||||
if camera_info and camera_info.is_recording:
|
if camera_info and camera_info.is_recording:
|
||||||
self.logger.info(f"Camera {camera_name} is already recording, skipping")
|
self.logger.info(f"⚠️ Camera {camera_name} is already recording, skipping auto-start")
|
||||||
return
|
return
|
||||||
|
|
||||||
# Use camera_manager if available, otherwise use standalone recorder
|
# Use camera_manager if available, otherwise use standalone recorder
|
||||||
@@ -227,8 +238,15 @@ class StandaloneAutoRecorder:
|
|||||||
from ..core.timezone_utils import format_filename_timestamp
|
from ..core.timezone_utils import format_filename_timestamp
|
||||||
timestamp = format_filename_timestamp()
|
timestamp = format_filename_timestamp()
|
||||||
camera_config = self.config.get_camera_by_name(camera_name)
|
camera_config = self.config.get_camera_by_name(camera_name)
|
||||||
|
|
||||||
|
if not camera_config:
|
||||||
|
self.logger.error(f"❌ AUTO-RECORDER: No configuration found for camera {camera_name}")
|
||||||
|
return
|
||||||
|
|
||||||
video_format = camera_config.video_format if camera_config else "mp4"
|
video_format = camera_config.video_format if camera_config else "mp4"
|
||||||
filename = f"{camera_name}_auto_{machine_name}_{timestamp}.{video_format}"
|
filename = f"{camera_name}_auto_{machine_name}_{timestamp}.{video_format}"
|
||||||
|
|
||||||
|
self.logger.info(f"📹 AUTO-RECORDER: Starting recording with settings - Exposure: {camera_config.exposure_ms}ms, Gain: {camera_config.gain}, FPS: {camera_config.target_fps}")
|
||||||
|
|
||||||
# Use camera manager to start recording with camera's default settings
|
# Use camera manager to start recording with camera's default settings
|
||||||
success = self.camera_manager.manual_start_recording(
|
success = self.camera_manager.manual_start_recording(
|
||||||
@@ -240,10 +258,10 @@ class StandaloneAutoRecorder:
|
|||||||
)
|
)
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
self.logger.info(f"✅ Started auto-recording: {camera_name} -> {filename}")
|
self.logger.info(f"✅ AUTO-RECORDER: Successfully started auto-recording: {camera_name} -> {filename}")
|
||||||
self.active_recordings[camera_name] = filename
|
self.active_recordings[camera_name] = filename
|
||||||
else:
|
else:
|
||||||
self.logger.error(f"❌ Failed to start auto-recording for camera {camera_name}")
|
self.logger.error(f"❌ AUTO-RECORDER: Failed to start auto-recording for camera {camera_name} (manual_start_recording returned False)")
|
||||||
else:
|
else:
|
||||||
# Standalone mode - use own recorder
|
# Standalone mode - use own recorder
|
||||||
recorder = self._get_camera_recorder(camera_name)
|
recorder = self._get_camera_recorder(camera_name)
|
||||||
|
|||||||
@@ -131,10 +131,23 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
- MEDIA_VIDEOS_DIR=/mnt/nfs_share
|
- MEDIA_VIDEOS_DIR=/mnt/nfs_share
|
||||||
- MEDIA_THUMBS_DIR=/mnt/nfs_share/.thumbnails
|
- MEDIA_THUMBS_DIR=/mnt/nfs_share/.thumbnails
|
||||||
|
- MAX_CONCURRENT_TRANSCODING=2 # Limit concurrent transcoding operations
|
||||||
volumes:
|
volumes:
|
||||||
- /mnt/nfs_share:/mnt/nfs_share
|
- /mnt/nfs_share:/mnt/nfs_share
|
||||||
ports:
|
ports:
|
||||||
- "8090:8090"
|
- "8090:8090"
|
||||||
|
deploy:
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpus: '4' # Limit to 4 CPU cores (adjust based on your system)
|
||||||
|
memory: 2G # Limit to 2GB RAM per container
|
||||||
|
reservations:
|
||||||
|
cpus: '1' # Reserve at least 1 CPU core
|
||||||
|
memory: 512M # Reserve at least 512MB RAM
|
||||||
|
# Alternative syntax for older Docker Compose versions:
|
||||||
|
# cpus: '4'
|
||||||
|
# mem_limit: 2g
|
||||||
|
# mem_reservation: 512m
|
||||||
|
|
||||||
mediamtx:
|
mediamtx:
|
||||||
image: bluenviron/mediamtx:latest
|
image: bluenviron/mediamtx:latest
|
||||||
|
|||||||
@@ -84,3 +84,4 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -116,3 +116,4 @@ CREATE POLICY "User roles are deletable by authenticated users" ON public.user_r
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -60,3 +60,4 @@ CREATE POLICY "Machine types are deletable by authenticated users" ON public.mac
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -74,3 +74,4 @@ CREATE POLICY "Experiment phases are deletable by authenticated users" ON public
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -65,3 +65,4 @@ CREATE POLICY "Experiment repetitions are deletable by authenticated users" ON p
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -86,3 +86,4 @@ CREATE POLICY "Meyer Cracker parameters are deletable by authenticated users" ON
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -190,3 +190,4 @@ CREATE POLICY "conductor_availability_delete_policy" ON public.conductor_availab
|
|||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,10 @@
|
|||||||
import os
|
import os
|
||||||
import pathlib
|
import pathlib
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
from typing import List, Optional
|
from typing import List, Optional, Tuple
|
||||||
|
|
||||||
from fastapi import FastAPI, HTTPException, Response, Request
|
from fastapi import FastAPI, HTTPException, Response, Request
|
||||||
from fastapi.middleware.cors import CORSMiddleware
|
from fastapi.middleware.cors import CORSMiddleware
|
||||||
@@ -11,6 +13,11 @@ from fastapi.responses import FileResponse, StreamingResponse
|
|||||||
MEDIA_DIR = pathlib.Path(os.getenv("MEDIA_VIDEOS_DIR", "/mnt/videos")).resolve()
|
MEDIA_DIR = pathlib.Path(os.getenv("MEDIA_VIDEOS_DIR", "/mnt/videos")).resolve()
|
||||||
THUMBS_DIR = pathlib.Path(os.getenv("MEDIA_THUMBS_DIR", MEDIA_DIR / ".thumbnails")).resolve()
|
THUMBS_DIR = pathlib.Path(os.getenv("MEDIA_THUMBS_DIR", MEDIA_DIR / ".thumbnails")).resolve()
|
||||||
|
|
||||||
|
# Limit concurrent transcoding operations to prevent resource exhaustion
|
||||||
|
# Adjust based on your CPU cores and available memory
|
||||||
|
MAX_CONCURRENT_TRANSCODING = int(os.getenv("MAX_CONCURRENT_TRANSCODING", "2"))
|
||||||
|
transcoding_semaphore = threading.Semaphore(MAX_CONCURRENT_TRANSCODING)
|
||||||
|
|
||||||
app = FastAPI(title="Media API", version="0.1.0")
|
app = FastAPI(title="Media API", version="0.1.0")
|
||||||
|
|
||||||
# CORS for dashboard at exp-dash:8080 (and localhost for convenience)
|
# CORS for dashboard at exp-dash:8080 (and localhost for convenience)
|
||||||
@@ -238,10 +245,45 @@ def stream_options():
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0):
|
def get_video_info(file_path: pathlib.Path) -> Tuple[float, Optional[int]]:
|
||||||
|
"""Get video duration and bitrate using ffprobe"""
|
||||||
|
try:
|
||||||
|
# Get duration
|
||||||
|
cmd_duration = [
|
||||||
|
"ffprobe",
|
||||||
|
"-v", "error",
|
||||||
|
"-show_entries", "format=duration",
|
||||||
|
"-of", "default=noprint_wrappers=1:nokey=1",
|
||||||
|
str(file_path)
|
||||||
|
]
|
||||||
|
result_duration = subprocess.run(cmd_duration, capture_output=True, text=True, check=True)
|
||||||
|
duration = float(result_duration.stdout.strip())
|
||||||
|
|
||||||
|
# Get bitrate
|
||||||
|
cmd_bitrate = [
|
||||||
|
"ffprobe",
|
||||||
|
"-v", "error",
|
||||||
|
"-show_entries", "format=bit_rate",
|
||||||
|
"-of", "default=noprint_wrappers=1:nokey=1",
|
||||||
|
str(file_path)
|
||||||
|
]
|
||||||
|
result_bitrate = subprocess.run(cmd_bitrate, capture_output=True, text=True, check=True)
|
||||||
|
bitrate_str = result_bitrate.stdout.strip()
|
||||||
|
bitrate = int(bitrate_str) if bitrate_str and bitrate_str.isdigit() else None
|
||||||
|
|
||||||
|
return duration, bitrate
|
||||||
|
except (subprocess.CalledProcessError, ValueError):
|
||||||
|
# Fallback: estimate from file size (very rough estimate)
|
||||||
|
file_size_mb = file_path.stat().st_size / (1024 * 1024)
|
||||||
|
duration = max(10.0, file_size_mb * 20) # Rough estimate: 20 seconds per MB
|
||||||
|
return duration, None
|
||||||
|
|
||||||
|
|
||||||
|
def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0, duration: Optional[float] = None):
|
||||||
"""
|
"""
|
||||||
Transcode video to H.264 on-the-fly using FFmpeg.
|
Transcode video to H.264 on-the-fly using FFmpeg.
|
||||||
Streams H.264/MP4 that browsers can actually play.
|
Streams H.264/MP4 that browsers can actually play.
|
||||||
|
Uses semaphore to limit concurrent transcoding operations.
|
||||||
"""
|
"""
|
||||||
if not file_path.exists():
|
if not file_path.exists():
|
||||||
raise HTTPException(status_code=404, detail="Video file not found")
|
raise HTTPException(status_code=404, detail="Video file not found")
|
||||||
@@ -249,56 +291,176 @@ def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0)
|
|||||||
if file_path.stat().st_size == 0:
|
if file_path.stat().st_size == 0:
|
||||||
raise HTTPException(status_code=500, detail="Video file is empty (0 bytes)")
|
raise HTTPException(status_code=500, detail="Video file is empty (0 bytes)")
|
||||||
|
|
||||||
# FFmpeg command to transcode to H.264 with web-optimized settings
|
# Acquire semaphore to limit concurrent transcoding
|
||||||
cmd = [
|
# This prevents resource exhaustion from too many simultaneous FFmpeg processes
|
||||||
"ffmpeg",
|
semaphore_acquired = False
|
||||||
"-i", str(file_path),
|
|
||||||
"-c:v", "libx264", # H.264 codec
|
|
||||||
"-preset", "ultrafast", # Fast encoding for real-time
|
|
||||||
"-tune", "zerolatency", # Low latency
|
|
||||||
"-crf", "23", # Quality (18-28, lower = better)
|
|
||||||
"-c:a", "aac", # AAC audio if present
|
|
||||||
"-movflags", "+faststart", # Web-optimized (moov atom at beginning)
|
|
||||||
"-f", "mp4", # MP4 container
|
|
||||||
"-" # Output to stdout
|
|
||||||
]
|
|
||||||
|
|
||||||
# If seeking to specific time
|
|
||||||
if start_time > 0:
|
|
||||||
cmd.insert(-2, "-ss")
|
|
||||||
cmd.insert(-2, str(start_time))
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
process = subprocess.Popen(
|
if not transcoding_semaphore.acquire(blocking=False):
|
||||||
cmd,
|
raise HTTPException(
|
||||||
stdout=subprocess.PIPE,
|
status_code=503,
|
||||||
stderr=subprocess.PIPE,
|
detail=f"Server busy: Maximum concurrent transcoding operations ({MAX_CONCURRENT_TRANSCODING}) reached. Please try again in a moment."
|
||||||
bufsize=8192
|
)
|
||||||
)
|
semaphore_acquired = True
|
||||||
|
# FFmpeg command to transcode to H.264 with web-optimized settings
|
||||||
|
# Use fragmented MP4 for HTTP streaming (doesn't require seekable output)
|
||||||
|
# frag_keyframe: fragment at keyframes
|
||||||
|
# dash: use DASH-compatible fragmentation
|
||||||
|
# omit_tfhd_offset: avoid seeking by omitting track fragment header offset
|
||||||
|
# Optimized for resource usage: ultrafast preset, limited threads
|
||||||
|
# Build command with proper order: input seeking first, then input, then filters/codecs
|
||||||
|
cmd = ["ffmpeg"]
|
||||||
|
|
||||||
# Stream chunks
|
# If seeking to specific time, use input seeking (before -i, more accurate)
|
||||||
chunk_size = 8192
|
if start_time > 0:
|
||||||
bytes_yielded = 0
|
cmd.extend(["-ss", str(start_time)])
|
||||||
while True:
|
|
||||||
chunk = process.stdout.read(chunk_size)
|
|
||||||
if not chunk:
|
|
||||||
break
|
|
||||||
bytes_yielded += len(chunk)
|
|
||||||
yield chunk
|
|
||||||
|
|
||||||
# Check for errors
|
# Input file
|
||||||
process.wait()
|
cmd.extend(["-i", str(file_path)])
|
||||||
if process.returncode != 0:
|
|
||||||
stderr = process.stderr.read().decode('utf-8', errors='ignore')
|
|
||||||
print(f"FFmpeg error (code {process.returncode}): {stderr}")
|
|
||||||
if bytes_yielded == 0:
|
|
||||||
raise HTTPException(status_code=500, detail=f"FFmpeg transcoding failed: {stderr[:200]}")
|
|
||||||
|
|
||||||
except HTTPException:
|
# Video codec settings
|
||||||
raise
|
cmd.extend([
|
||||||
except Exception as e:
|
"-c:v", "libx264", # H.264 codec
|
||||||
print(f"FFmpeg transcoding error: {e}")
|
"-preset", "ultrafast", # Fast encoding for real-time (lowest CPU usage)
|
||||||
raise HTTPException(status_code=500, detail=f"Transcoding error: {str(e)}")
|
"-tune", "zerolatency", # Low latency
|
||||||
|
"-crf", "23", # Quality (18-28, lower = better)
|
||||||
|
"-threads", "2", # Limit threads to reduce CPU usage (adjust based on CPU cores)
|
||||||
|
"-max_muxing_queue_size", "1024", # Prevent buffer overflow
|
||||||
|
])
|
||||||
|
|
||||||
|
# If duration is specified (for range requests), limit output duration
|
||||||
|
if duration is not None:
|
||||||
|
cmd.extend(["-t", str(duration)])
|
||||||
|
|
||||||
|
# Audio codec settings
|
||||||
|
cmd.extend([
|
||||||
|
"-c:a", "aac", # AAC audio if present
|
||||||
|
"-b:a", "128k", # Limit audio bitrate to save resources
|
||||||
|
])
|
||||||
|
|
||||||
|
# Output format settings
|
||||||
|
cmd.extend([
|
||||||
|
"-movflags", "frag_keyframe+dash+omit_tfhd_offset", # Fragmented MP4 optimized for HTTP streaming
|
||||||
|
"-f", "mp4", # MP4 container
|
||||||
|
"-" # Output to stdout
|
||||||
|
])
|
||||||
|
|
||||||
|
process = None
|
||||||
|
stderr_thread = None
|
||||||
|
try:
|
||||||
|
process = subprocess.Popen(
|
||||||
|
cmd,
|
||||||
|
stdout=subprocess.PIPE,
|
||||||
|
stderr=subprocess.PIPE,
|
||||||
|
bufsize=0 # Unbuffered for better streaming
|
||||||
|
)
|
||||||
|
|
||||||
|
# Stream chunks
|
||||||
|
chunk_size = 8192
|
||||||
|
bytes_yielded = 0
|
||||||
|
stderr_data = []
|
||||||
|
|
||||||
|
# Read stderr in background (to avoid blocking)
|
||||||
|
def read_stderr():
|
||||||
|
while True:
|
||||||
|
chunk = process.stderr.read(1024)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
stderr_data.append(chunk)
|
||||||
|
|
||||||
|
stderr_thread = threading.Thread(target=read_stderr, daemon=True)
|
||||||
|
stderr_thread.start()
|
||||||
|
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
chunk = process.stdout.read(chunk_size)
|
||||||
|
if not chunk:
|
||||||
|
break
|
||||||
|
bytes_yielded += len(chunk)
|
||||||
|
yield chunk
|
||||||
|
except GeneratorExit:
|
||||||
|
# Generator was closed/stopped - cleanup process
|
||||||
|
if process and process.poll() is None:
|
||||||
|
process.terminate()
|
||||||
|
# Wait briefly, then force kill if needed
|
||||||
|
time.sleep(0.5)
|
||||||
|
if process.poll() is None:
|
||||||
|
try:
|
||||||
|
process.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
# Error during streaming - cleanup and re-raise
|
||||||
|
if process and process.poll() is None:
|
||||||
|
process.terminate()
|
||||||
|
# Wait briefly, then force kill if needed
|
||||||
|
time.sleep(0.5)
|
||||||
|
if process.poll() is None:
|
||||||
|
try:
|
||||||
|
process.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
raise
|
||||||
|
|
||||||
|
# Wait for process to finish and stderr thread to complete
|
||||||
|
process.wait()
|
||||||
|
if stderr_thread:
|
||||||
|
stderr_thread.join(timeout=1)
|
||||||
|
|
||||||
|
# Check for errors
|
||||||
|
if process.returncode != 0:
|
||||||
|
stderr = b''.join(stderr_data).decode('utf-8', errors='ignore')
|
||||||
|
# Extract actual error message (skip version banner)
|
||||||
|
error_lines = stderr.split('\n')
|
||||||
|
# Skip version/configuration lines and get actual error
|
||||||
|
error_msg = '\n'.join([line for line in error_lines if line and
|
||||||
|
not line.startswith('ffmpeg version') and
|
||||||
|
not line.startswith('built with') and
|
||||||
|
not line.startswith('configuration:') and
|
||||||
|
not line.startswith('libav') and
|
||||||
|
'Copyright' not in line])
|
||||||
|
|
||||||
|
# If no meaningful error found, use last few lines
|
||||||
|
if not error_msg.strip():
|
||||||
|
error_msg = '\n'.join(error_lines[-10:])
|
||||||
|
|
||||||
|
print(f"FFmpeg error (code {process.returncode}): Full stderr:\n{stderr}")
|
||||||
|
print(f"FFmpeg command was: {' '.join(cmd)}")
|
||||||
|
|
||||||
|
if bytes_yielded == 0:
|
||||||
|
# Show first 500 chars of actual error (not just version info)
|
||||||
|
error_detail = error_msg[:500] if error_msg else stderr[:500]
|
||||||
|
raise HTTPException(status_code=500, detail=f"FFmpeg transcoding failed: {error_detail}")
|
||||||
|
|
||||||
|
except HTTPException:
|
||||||
|
raise
|
||||||
|
except Exception as e:
|
||||||
|
# Ensure process is cleaned up on any error
|
||||||
|
if process and process.poll() is None:
|
||||||
|
try:
|
||||||
|
process.terminate()
|
||||||
|
# Wait briefly, then force kill if needed
|
||||||
|
time.sleep(0.5)
|
||||||
|
if process.poll() is None:
|
||||||
|
try:
|
||||||
|
process.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
except Exception:
|
||||||
|
try:
|
||||||
|
process.kill()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
print(f"FFmpeg transcoding error: {e}")
|
||||||
|
raise HTTPException(status_code=500, detail=f"Transcoding error: {str(e)}")
|
||||||
|
finally:
|
||||||
|
# Always release semaphore when done (success or error)
|
||||||
|
# Only release if we actually acquired it
|
||||||
|
if semaphore_acquired:
|
||||||
|
try:
|
||||||
|
transcoding_semaphore.release()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error releasing semaphore: {e}")
|
||||||
|
|
||||||
|
|
||||||
@app.head("/videos/{file_id:path}/stream-transcoded")
|
@app.head("/videos/{file_id:path}/stream-transcoded")
|
||||||
@@ -309,10 +471,14 @@ def stream_transcoded(request: Request, file_id: str, start_time: float = 0.0):
|
|||||||
"""
|
"""
|
||||||
Stream video transcoded to H.264 on-the-fly.
|
Stream video transcoded to H.264 on-the-fly.
|
||||||
This endpoint converts MPEG-4 Part 2 videos to H.264 for browser compatibility.
|
This endpoint converts MPEG-4 Part 2 videos to H.264 for browser compatibility.
|
||||||
|
Supports seeking via HTTP Range requests or start_time parameter.
|
||||||
"""
|
"""
|
||||||
p = path_from_file_id(file_id)
|
p = path_from_file_id(file_id)
|
||||||
content_type = "video/mp4"
|
content_type = "video/mp4"
|
||||||
|
|
||||||
|
# Get video duration and bitrate for range request handling
|
||||||
|
video_duration, original_bitrate = get_video_info(p)
|
||||||
|
|
||||||
# Base headers
|
# Base headers
|
||||||
headers = {
|
headers = {
|
||||||
"Content-Type": content_type,
|
"Content-Type": content_type,
|
||||||
@@ -321,14 +487,69 @@ def stream_transcoded(request: Request, file_id: str, start_time: float = 0.0):
|
|||||||
"Access-Control-Allow-Methods": "GET, HEAD, OPTIONS",
|
"Access-Control-Allow-Methods": "GET, HEAD, OPTIONS",
|
||||||
"Access-Control-Allow-Headers": "Range, Content-Type",
|
"Access-Control-Allow-Headers": "Range, Content-Type",
|
||||||
"Access-Control-Expose-Headers": "Content-Range, Accept-Ranges, Content-Length",
|
"Access-Control-Expose-Headers": "Content-Range, Accept-Ranges, Content-Length",
|
||||||
|
"Accept-Ranges": "bytes",
|
||||||
}
|
}
|
||||||
|
|
||||||
# For HEAD requests, just return headers (we can't know size without transcoding)
|
# For HEAD requests, return headers (estimate size)
|
||||||
if request.method == "HEAD":
|
if request.method == "HEAD":
|
||||||
|
# Rough estimate: ~2-3 MB per minute of video
|
||||||
|
estimated_size = int(video_duration * 50000) # ~50KB per second estimate
|
||||||
|
headers["Content-Length"] = str(estimated_size)
|
||||||
return Response(status_code=200, headers=headers)
|
return Response(status_code=200, headers=headers)
|
||||||
|
|
||||||
# Note: Range requests are complex with transcoding, so we'll transcode from start
|
# Handle Range requests for seeking
|
||||||
# For better performance with range requests, we'd need to cache transcoded segments
|
range_header = request.headers.get("range")
|
||||||
|
|
||||||
|
if range_header:
|
||||||
|
# Parse range request: bytes=START-END
|
||||||
|
range_value = range_header.strip().lower().replace("bytes=", "")
|
||||||
|
start_str, _, end_str = range_value.partition("-")
|
||||||
|
|
||||||
|
try:
|
||||||
|
byte_start = int(start_str) if start_str else 0
|
||||||
|
byte_end = int(end_str) if end_str else None
|
||||||
|
except ValueError:
|
||||||
|
# Invalid range, ignore and stream from start
|
||||||
|
range_header = None
|
||||||
|
|
||||||
|
if range_header:
|
||||||
|
# For seeking, convert byte range to time-based seeking
|
||||||
|
# Estimate transcoded bitrate (H.264 is typically more efficient than original)
|
||||||
|
# Use original bitrate if available, otherwise estimate
|
||||||
|
if original_bitrate:
|
||||||
|
# H.264 transcoding typically uses 70-80% of original bitrate at same quality
|
||||||
|
transcoded_bitrate = int(original_bitrate * 0.75)
|
||||||
|
else:
|
||||||
|
# Default estimate: 2 Mbps
|
||||||
|
transcoded_bitrate = 2000000
|
||||||
|
|
||||||
|
estimated_total_bytes = int(video_duration * transcoded_bitrate / 8)
|
||||||
|
|
||||||
|
if estimated_total_bytes > 0 and byte_start < estimated_total_bytes:
|
||||||
|
# Calculate time position from byte offset
|
||||||
|
time_start_sec = (byte_start / estimated_total_bytes) * video_duration
|
||||||
|
time_start_sec = max(0.0, min(time_start_sec, video_duration - 0.5))
|
||||||
|
|
||||||
|
# For seeking, don't limit duration - stream to end
|
||||||
|
# The browser will handle buffering
|
||||||
|
duration_sec = None # None means stream to end
|
||||||
|
|
||||||
|
# Update headers for range response
|
||||||
|
# For seeking, we typically don't know the exact end, so estimate
|
||||||
|
actual_byte_end = min(byte_end or estimated_total_bytes - 1, estimated_total_bytes - 1)
|
||||||
|
headers["Content-Range"] = f"bytes {byte_start}-{actual_byte_end}/{estimated_total_bytes}"
|
||||||
|
headers["Content-Length"] = str(actual_byte_end - byte_start + 1)
|
||||||
|
|
||||||
|
# Stream from the calculated time position using FFmpeg's -ss flag
|
||||||
|
# Duration is None, so it will stream to the end
|
||||||
|
return StreamingResponse(
|
||||||
|
generate_transcoded_stream(p, time_start_sec, duration_sec),
|
||||||
|
media_type=content_type,
|
||||||
|
headers=headers,
|
||||||
|
status_code=206 # Partial Content
|
||||||
|
)
|
||||||
|
|
||||||
|
# No range request or invalid range - stream from start_time
|
||||||
return StreamingResponse(
|
return StreamingResponse(
|
||||||
generate_transcoded_stream(p, start_time),
|
generate_transcoded_stream(p, start_time),
|
||||||
media_type=content_type,
|
media_type=content_type,
|
||||||
|
|||||||
@@ -23,8 +23,8 @@ export const VideoModal: React.FC<Props> = ({ fileId, onClose }) => {
|
|||||||
controls: true,
|
controls: true,
|
||||||
autoplay: true,
|
autoplay: true,
|
||||||
preload: 'auto',
|
preload: 'auto',
|
||||||
fluid: true,
|
fluid: false, // Disable fluid mode to respect container boundaries
|
||||||
responsive: true,
|
responsive: false, // Disable responsive mode to prevent overflow
|
||||||
playbackRates: [0.5, 1, 1.25, 1.5, 2],
|
playbackRates: [0.5, 1, 1.25, 1.5, 2],
|
||||||
sources: [
|
sources: [
|
||||||
{
|
{
|
||||||
@@ -65,53 +65,75 @@ export const VideoModal: React.FC<Props> = ({ fileId, onClose }) => {
|
|||||||
if (!fileId || !src) return null
|
if (!fileId || !src) return null
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<div
|
<>
|
||||||
className="fixed inset-0 z-[1000] bg-black/60 dark:bg-black/80 backdrop-blur-sm flex items-center justify-center p-4"
|
<style>{`
|
||||||
onClick={onClose}
|
.video-modal-container .video-js {
|
||||||
>
|
width: 100% !important;
|
||||||
|
height: 100% !important;
|
||||||
|
max-width: 100% !important;
|
||||||
|
max-height: 100% !important;
|
||||||
|
}
|
||||||
|
.video-modal-container .video-js .vjs-tech {
|
||||||
|
width: 100% !important;
|
||||||
|
height: 100% !important;
|
||||||
|
object-fit: contain;
|
||||||
|
}
|
||||||
|
.video-modal-container .video-js .vjs-control-bar {
|
||||||
|
position: absolute !important;
|
||||||
|
bottom: 0 !important;
|
||||||
|
left: 0 !important;
|
||||||
|
right: 0 !important;
|
||||||
|
width: 100% !important;
|
||||||
|
}
|
||||||
|
`}</style>
|
||||||
<div
|
<div
|
||||||
className="bg-white dark:bg-gray-800 rounded-lg shadow-2xl w-full max-w-4xl overflow-hidden transform transition-all relative"
|
className="fixed inset-0 z-[1000] bg-black/60 dark:bg-black/80 backdrop-blur-sm flex items-center justify-center p-4"
|
||||||
onClick={(e: React.MouseEvent) => e.stopPropagation()}
|
onClick={onClose}
|
||||||
>
|
>
|
||||||
{/* Close button - positioned absolutely in top right corner */}
|
<div
|
||||||
<button
|
className="video-modal-container bg-white dark:bg-gray-800 rounded-lg shadow-2xl w-full max-w-4xl max-h-[90vh] overflow-hidden transform transition-all relative flex flex-col"
|
||||||
onClick={onClose}
|
onClick={(e: React.MouseEvent) => e.stopPropagation()}
|
||||||
className="absolute top-3 right-3 z-10 inline-flex items-center justify-center w-10 h-10 rounded-lg bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-300 border border-gray-300 dark:border-gray-600 shadow-md hover:bg-red-50 dark:hover:bg-red-900/20 hover:text-red-600 dark:hover:text-red-400 hover:border-red-300 dark:hover:border-red-700 transition-all duration-200"
|
|
||||||
aria-label="Close modal"
|
|
||||||
>
|
>
|
||||||
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" strokeWidth={2.5}>
|
{/* Close button - positioned absolutely in top right corner */}
|
||||||
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
<button
|
||||||
</svg>
|
onClick={onClose}
|
||||||
</button>
|
className="absolute top-3 right-3 z-10 inline-flex items-center justify-center w-10 h-10 rounded-lg bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-300 border border-gray-300 dark:border-gray-600 shadow-md hover:bg-red-50 dark:hover:bg-red-900/20 hover:text-red-600 dark:hover:text-red-400 hover:border-red-300 dark:hover:border-red-700 transition-all duration-200"
|
||||||
|
aria-label="Close modal"
|
||||||
|
>
|
||||||
|
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" strokeWidth={2.5}>
|
||||||
|
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
|
||||||
|
</svg>
|
||||||
|
</button>
|
||||||
|
|
||||||
<div className="p-4 bg-gray-50 dark:bg-gray-900 border-b border-gray-200 dark:border-gray-700">
|
<div className="p-4 bg-gray-50 dark:bg-gray-900 border-b border-gray-200 dark:border-gray-700">
|
||||||
<div className="flex items-center justify-between">
|
<div className="flex items-center justify-between">
|
||||||
<div>
|
<div>
|
||||||
<h3 className="text-lg font-semibold text-gray-900 dark:text-white">Video Player</h3>
|
<h3 className="text-lg font-semibold text-gray-900 dark:text-white">Video Player</h3>
|
||||||
<p className="text-xs text-gray-500 dark:text-gray-400 mt-0.5">Watch your recording</p>
|
<p className="text-xs text-gray-500 dark:text-gray-400 mt-0.5">Watch your recording</p>
|
||||||
|
</div>
|
||||||
|
<a
|
||||||
|
href={`${BASE}/videos/stream?file_id=${encodeURIComponent(fileId)}`}
|
||||||
|
download={fileId.split('/').pop() || 'video.mp4'}
|
||||||
|
className="px-3 py-1.5 text-xs bg-blue-600 hover:bg-blue-700 text-white rounded-md transition-colors"
|
||||||
|
onClick={(e) => e.stopPropagation()}
|
||||||
|
>
|
||||||
|
Download Video
|
||||||
|
</a>
|
||||||
</div>
|
</div>
|
||||||
<a
|
|
||||||
href={`${BASE}/videos/stream?file_id=${encodeURIComponent(fileId)}`}
|
|
||||||
download={fileId.split('/').pop() || 'video.mp4'}
|
|
||||||
className="px-3 py-1.5 text-xs bg-blue-600 hover:bg-blue-700 text-white rounded-md transition-colors"
|
|
||||||
onClick={(e) => e.stopPropagation()}
|
|
||||||
>
|
|
||||||
Download Video
|
|
||||||
</a>
|
|
||||||
</div>
|
</div>
|
||||||
</div>
|
<div className="flex-1 min-h-0 bg-black p-4 flex items-center justify-center">
|
||||||
<div className="p-4 bg-black">
|
<div className="relative w-full max-w-full max-h-full" style={{ aspectRatio: '16/9', maxHeight: 'calc(90vh - 200px)' }}>
|
||||||
<div className="relative w-full" style={{ aspectRatio: '16/9', maxHeight: '70vh' }}>
|
<video
|
||||||
<video
|
ref={videoRef}
|
||||||
ref={videoRef}
|
className="video-js vjs-default-skin w-full h-full"
|
||||||
className="video-js vjs-default-skin w-full h-full"
|
playsInline
|
||||||
playsInline
|
key={fileId}
|
||||||
key={fileId}
|
/>
|
||||||
/>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user