Enhance media API transcoding and video streaming capabilities

- Added support for limiting concurrent transcoding operations in the media API to prevent resource exhaustion.
- Implemented functions to retrieve video duration and bitrate using ffprobe for improved streaming performance.
- Enhanced the generate_transcoded_stream function to handle HTTP range requests, allowing for more efficient video playback.
- Updated VideoModal component to disable fluid and responsive modes, ensuring proper container boundaries during video playback.
- Improved logging throughout the transcoding process for better error tracking and debugging.
This commit is contained in:
salirezav
2025-11-04 11:55:27 -05:00
parent de46753f15
commit 5070d9b2ca
15 changed files with 391 additions and 99 deletions

View File

@@ -251,3 +251,4 @@ Session completed with focus on MQTT debugging and enhanced logging for troubles

View File

@@ -162,3 +162,4 @@ if __name__ == "__main__":

View File

@@ -12,7 +12,7 @@ import paho.mqtt.client as mqtt
from ..core.config import Config, MQTTConfig
from ..core.state_manager import StateManager
from ..core.events import EventSystem, EventType, publish_machine_state_changed
from ..core.events import EventSystem, EventType
from .handlers import MQTTMessageHandler

View File

@@ -9,7 +9,7 @@ from typing import Dict, Optional
from datetime import datetime
from ..core.state_manager import StateManager, MachineState
from ..core.events import EventSystem, publish_machine_state_changed
from ..core.events import EventSystem, EventType
class MQTTMessageHandler:
@@ -47,7 +47,16 @@ class MQTTMessageHandler:
if state_changed:
self.logger.info(f"📡 MQTT: Machine {machine_name} state changed to: {normalized_payload}")
self.logger.info(f"📡 Publishing MACHINE_STATE_CHANGED event for {machine_name} -> {normalized_payload}")
publish_machine_state_changed(machine_name=machine_name, state=normalized_payload, source="mqtt_handler")
# Use the event_system instance passed to this handler, not the global one
self.event_system.publish(
EventType.MACHINE_STATE_CHANGED,
"mqtt_handler",
{
"machine_name": machine_name,
"state": normalized_payload,
"previous_state": None
}
)
self.logger.info(f"✅ Published MACHINE_STATE_CHANGED event for {machine_name} -> {normalized_payload}")
else:
self.logger.info(f"📡 Machine {machine_name} state unchanged (still {normalized_payload}) - no event published")

View File

@@ -103,7 +103,15 @@ class StandaloneAutoRecorder:
machine_name = camera_config.machine_topic
if machine_name:
mapping[machine_name] = camera_config.name
self.logger.info(f"Auto-recording enabled: {machine_name} -> {camera_config.name}")
self.logger.info(f"Auto-recording enabled: '{machine_name}' -> {camera_config.name}")
else:
self.logger.warning(f"⚠️ Camera {camera_config.name} has no machine_topic configured")
else:
if not camera_config.enabled:
self.logger.debug(f"Camera {camera_config.name} is disabled")
elif not camera_config.auto_start_recording_enabled:
self.logger.debug(f"Camera {camera_config.name} has auto-recording disabled")
self.logger.info(f"📋 Built machine-camera mapping with {len(mapping)} entries: {mapping}")
return mapping
def _setup_mqtt(self) -> bool:
@@ -197,7 +205,8 @@ class StandaloneAutoRecorder:
# Check if we have a camera for this machine
camera_name = self.machine_camera_map.get(machine_name)
if not camera_name:
self.logger.debug(f"No camera mapped to machine: {machine_name}")
self.logger.warning(f"❌ AUTO-RECORDER: No camera mapped to machine: '{machine_name}'")
self.logger.info(f"📋 Available machine-camera mappings: {self.machine_camera_map}")
return
self.logger.info(f"📡 MQTT: Machine {machine_name} ({camera_name}) -> {state}")
@@ -215,10 +224,12 @@ class StandaloneAutoRecorder:
def _start_recording(self, camera_name: str, machine_name: str):
"""Start recording for a camera"""
try:
self.logger.info(f"🎬 AUTO-RECORDER: Attempting to start recording for {camera_name} (machine: {machine_name})")
# Check if already recording
camera_info = self.state_manager.get_camera_status(camera_name) if self.state_manager else None
if camera_info and camera_info.is_recording:
self.logger.info(f"Camera {camera_name} is already recording, skipping")
self.logger.info(f"⚠️ Camera {camera_name} is already recording, skipping auto-start")
return
# Use camera_manager if available, otherwise use standalone recorder
@@ -227,8 +238,15 @@ class StandaloneAutoRecorder:
from ..core.timezone_utils import format_filename_timestamp
timestamp = format_filename_timestamp()
camera_config = self.config.get_camera_by_name(camera_name)
if not camera_config:
self.logger.error(f"❌ AUTO-RECORDER: No configuration found for camera {camera_name}")
return
video_format = camera_config.video_format if camera_config else "mp4"
filename = f"{camera_name}_auto_{machine_name}_{timestamp}.{video_format}"
self.logger.info(f"📹 AUTO-RECORDER: Starting recording with settings - Exposure: {camera_config.exposure_ms}ms, Gain: {camera_config.gain}, FPS: {camera_config.target_fps}")
# Use camera manager to start recording with camera's default settings
success = self.camera_manager.manual_start_recording(
@@ -240,10 +258,10 @@ class StandaloneAutoRecorder:
)
if success:
self.logger.info(f"Started auto-recording: {camera_name} -> {filename}")
self.logger.info(f"AUTO-RECORDER: Successfully started auto-recording: {camera_name} -> {filename}")
self.active_recordings[camera_name] = filename
else:
self.logger.error(f"❌ Failed to start auto-recording for camera {camera_name}")
self.logger.error(f" AUTO-RECORDER: Failed to start auto-recording for camera {camera_name} (manual_start_recording returned False)")
else:
# Standalone mode - use own recorder
recorder = self._get_camera_recorder(camera_name)

View File

@@ -131,10 +131,23 @@ services:
environment:
- MEDIA_VIDEOS_DIR=/mnt/nfs_share
- MEDIA_THUMBS_DIR=/mnt/nfs_share/.thumbnails
- MAX_CONCURRENT_TRANSCODING=2 # Limit concurrent transcoding operations
volumes:
- /mnt/nfs_share:/mnt/nfs_share
ports:
- "8090:8090"
deploy:
resources:
limits:
cpus: '4' # Limit to 4 CPU cores (adjust based on your system)
memory: 2G # Limit to 2GB RAM per container
reservations:
cpus: '1' # Reserve at least 1 CPU core
memory: 512M # Reserve at least 512MB RAM
# Alternative syntax for older Docker Compose versions:
# cpus: '4'
# mem_limit: 2g
# mem_reservation: 512m
mediamtx:
image: bluenviron/mediamtx:latest

View File

@@ -84,3 +84,4 @@ $$ LANGUAGE plpgsql SECURITY DEFINER;

View File

@@ -116,3 +116,4 @@ CREATE POLICY "User roles are deletable by authenticated users" ON public.user_r

View File

@@ -60,3 +60,4 @@ CREATE POLICY "Machine types are deletable by authenticated users" ON public.mac

View File

@@ -74,3 +74,4 @@ CREATE POLICY "Experiment phases are deletable by authenticated users" ON public

View File

@@ -65,3 +65,4 @@ CREATE POLICY "Experiment repetitions are deletable by authenticated users" ON p

View File

@@ -86,3 +86,4 @@ CREATE POLICY "Meyer Cracker parameters are deletable by authenticated users" ON

View File

@@ -190,3 +190,4 @@ CREATE POLICY "conductor_availability_delete_policy" ON public.conductor_availab

View File

@@ -1,8 +1,10 @@
import os
import pathlib
import subprocess
import threading
import time
import urllib.parse
from typing import List, Optional
from typing import List, Optional, Tuple
from fastapi import FastAPI, HTTPException, Response, Request
from fastapi.middleware.cors import CORSMiddleware
@@ -11,6 +13,11 @@ from fastapi.responses import FileResponse, StreamingResponse
MEDIA_DIR = pathlib.Path(os.getenv("MEDIA_VIDEOS_DIR", "/mnt/videos")).resolve()
THUMBS_DIR = pathlib.Path(os.getenv("MEDIA_THUMBS_DIR", MEDIA_DIR / ".thumbnails")).resolve()
# Limit concurrent transcoding operations to prevent resource exhaustion
# Adjust based on your CPU cores and available memory
MAX_CONCURRENT_TRANSCODING = int(os.getenv("MAX_CONCURRENT_TRANSCODING", "2"))
transcoding_semaphore = threading.Semaphore(MAX_CONCURRENT_TRANSCODING)
app = FastAPI(title="Media API", version="0.1.0")
# CORS for dashboard at exp-dash:8080 (and localhost for convenience)
@@ -238,10 +245,45 @@ def stream_options():
)
def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0):
def get_video_info(file_path: pathlib.Path) -> Tuple[float, Optional[int]]:
"""Get video duration and bitrate using ffprobe"""
try:
# Get duration
cmd_duration = [
"ffprobe",
"-v", "error",
"-show_entries", "format=duration",
"-of", "default=noprint_wrappers=1:nokey=1",
str(file_path)
]
result_duration = subprocess.run(cmd_duration, capture_output=True, text=True, check=True)
duration = float(result_duration.stdout.strip())
# Get bitrate
cmd_bitrate = [
"ffprobe",
"-v", "error",
"-show_entries", "format=bit_rate",
"-of", "default=noprint_wrappers=1:nokey=1",
str(file_path)
]
result_bitrate = subprocess.run(cmd_bitrate, capture_output=True, text=True, check=True)
bitrate_str = result_bitrate.stdout.strip()
bitrate = int(bitrate_str) if bitrate_str and bitrate_str.isdigit() else None
return duration, bitrate
except (subprocess.CalledProcessError, ValueError):
# Fallback: estimate from file size (very rough estimate)
file_size_mb = file_path.stat().st_size / (1024 * 1024)
duration = max(10.0, file_size_mb * 20) # Rough estimate: 20 seconds per MB
return duration, None
def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0, duration: Optional[float] = None):
"""
Transcode video to H.264 on-the-fly using FFmpeg.
Streams H.264/MP4 that browsers can actually play.
Uses semaphore to limit concurrent transcoding operations.
"""
if not file_path.exists():
raise HTTPException(status_code=404, detail="Video file not found")
@@ -249,56 +291,176 @@ def generate_transcoded_stream(file_path: pathlib.Path, start_time: float = 0.0)
if file_path.stat().st_size == 0:
raise HTTPException(status_code=500, detail="Video file is empty (0 bytes)")
# FFmpeg command to transcode to H.264 with web-optimized settings
cmd = [
"ffmpeg",
"-i", str(file_path),
"-c:v", "libx264", # H.264 codec
"-preset", "ultrafast", # Fast encoding for real-time
"-tune", "zerolatency", # Low latency
"-crf", "23", # Quality (18-28, lower = better)
"-c:a", "aac", # AAC audio if present
"-movflags", "+faststart", # Web-optimized (moov atom at beginning)
"-f", "mp4", # MP4 container
"-" # Output to stdout
]
# If seeking to specific time
if start_time > 0:
cmd.insert(-2, "-ss")
cmd.insert(-2, str(start_time))
# Acquire semaphore to limit concurrent transcoding
# This prevents resource exhaustion from too many simultaneous FFmpeg processes
semaphore_acquired = False
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=8192
)
if not transcoding_semaphore.acquire(blocking=False):
raise HTTPException(
status_code=503,
detail=f"Server busy: Maximum concurrent transcoding operations ({MAX_CONCURRENT_TRANSCODING}) reached. Please try again in a moment."
)
semaphore_acquired = True
# FFmpeg command to transcode to H.264 with web-optimized settings
# Use fragmented MP4 for HTTP streaming (doesn't require seekable output)
# frag_keyframe: fragment at keyframes
# dash: use DASH-compatible fragmentation
# omit_tfhd_offset: avoid seeking by omitting track fragment header offset
# Optimized for resource usage: ultrafast preset, limited threads
# Build command with proper order: input seeking first, then input, then filters/codecs
cmd = ["ffmpeg"]
# Stream chunks
chunk_size = 8192
bytes_yielded = 0
while True:
chunk = process.stdout.read(chunk_size)
if not chunk:
break
bytes_yielded += len(chunk)
yield chunk
# If seeking to specific time, use input seeking (before -i, more accurate)
if start_time > 0:
cmd.extend(["-ss", str(start_time)])
# Check for errors
process.wait()
if process.returncode != 0:
stderr = process.stderr.read().decode('utf-8', errors='ignore')
print(f"FFmpeg error (code {process.returncode}): {stderr}")
if bytes_yielded == 0:
raise HTTPException(status_code=500, detail=f"FFmpeg transcoding failed: {stderr[:200]}")
# Input file
cmd.extend(["-i", str(file_path)])
except HTTPException:
raise
except Exception as e:
print(f"FFmpeg transcoding error: {e}")
raise HTTPException(status_code=500, detail=f"Transcoding error: {str(e)}")
# Video codec settings
cmd.extend([
"-c:v", "libx264", # H.264 codec
"-preset", "ultrafast", # Fast encoding for real-time (lowest CPU usage)
"-tune", "zerolatency", # Low latency
"-crf", "23", # Quality (18-28, lower = better)
"-threads", "2", # Limit threads to reduce CPU usage (adjust based on CPU cores)
"-max_muxing_queue_size", "1024", # Prevent buffer overflow
])
# If duration is specified (for range requests), limit output duration
if duration is not None:
cmd.extend(["-t", str(duration)])
# Audio codec settings
cmd.extend([
"-c:a", "aac", # AAC audio if present
"-b:a", "128k", # Limit audio bitrate to save resources
])
# Output format settings
cmd.extend([
"-movflags", "frag_keyframe+dash+omit_tfhd_offset", # Fragmented MP4 optimized for HTTP streaming
"-f", "mp4", # MP4 container
"-" # Output to stdout
])
process = None
stderr_thread = None
try:
process = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=0 # Unbuffered for better streaming
)
# Stream chunks
chunk_size = 8192
bytes_yielded = 0
stderr_data = []
# Read stderr in background (to avoid blocking)
def read_stderr():
while True:
chunk = process.stderr.read(1024)
if not chunk:
break
stderr_data.append(chunk)
stderr_thread = threading.Thread(target=read_stderr, daemon=True)
stderr_thread.start()
try:
while True:
chunk = process.stdout.read(chunk_size)
if not chunk:
break
bytes_yielded += len(chunk)
yield chunk
except GeneratorExit:
# Generator was closed/stopped - cleanup process
if process and process.poll() is None:
process.terminate()
# Wait briefly, then force kill if needed
time.sleep(0.5)
if process.poll() is None:
try:
process.kill()
except Exception:
pass
raise
except Exception as e:
# Error during streaming - cleanup and re-raise
if process and process.poll() is None:
process.terminate()
# Wait briefly, then force kill if needed
time.sleep(0.5)
if process.poll() is None:
try:
process.kill()
except Exception:
pass
raise
# Wait for process to finish and stderr thread to complete
process.wait()
if stderr_thread:
stderr_thread.join(timeout=1)
# Check for errors
if process.returncode != 0:
stderr = b''.join(stderr_data).decode('utf-8', errors='ignore')
# Extract actual error message (skip version banner)
error_lines = stderr.split('\n')
# Skip version/configuration lines and get actual error
error_msg = '\n'.join([line for line in error_lines if line and
not line.startswith('ffmpeg version') and
not line.startswith('built with') and
not line.startswith('configuration:') and
not line.startswith('libav') and
'Copyright' not in line])
# If no meaningful error found, use last few lines
if not error_msg.strip():
error_msg = '\n'.join(error_lines[-10:])
print(f"FFmpeg error (code {process.returncode}): Full stderr:\n{stderr}")
print(f"FFmpeg command was: {' '.join(cmd)}")
if bytes_yielded == 0:
# Show first 500 chars of actual error (not just version info)
error_detail = error_msg[:500] if error_msg else stderr[:500]
raise HTTPException(status_code=500, detail=f"FFmpeg transcoding failed: {error_detail}")
except HTTPException:
raise
except Exception as e:
# Ensure process is cleaned up on any error
if process and process.poll() is None:
try:
process.terminate()
# Wait briefly, then force kill if needed
time.sleep(0.5)
if process.poll() is None:
try:
process.kill()
except Exception:
pass
except Exception:
try:
process.kill()
except Exception:
pass
print(f"FFmpeg transcoding error: {e}")
raise HTTPException(status_code=500, detail=f"Transcoding error: {str(e)}")
finally:
# Always release semaphore when done (success or error)
# Only release if we actually acquired it
if semaphore_acquired:
try:
transcoding_semaphore.release()
except Exception as e:
print(f"Error releasing semaphore: {e}")
@app.head("/videos/{file_id:path}/stream-transcoded")
@@ -309,10 +471,14 @@ def stream_transcoded(request: Request, file_id: str, start_time: float = 0.0):
"""
Stream video transcoded to H.264 on-the-fly.
This endpoint converts MPEG-4 Part 2 videos to H.264 for browser compatibility.
Supports seeking via HTTP Range requests or start_time parameter.
"""
p = path_from_file_id(file_id)
content_type = "video/mp4"
# Get video duration and bitrate for range request handling
video_duration, original_bitrate = get_video_info(p)
# Base headers
headers = {
"Content-Type": content_type,
@@ -321,14 +487,69 @@ def stream_transcoded(request: Request, file_id: str, start_time: float = 0.0):
"Access-Control-Allow-Methods": "GET, HEAD, OPTIONS",
"Access-Control-Allow-Headers": "Range, Content-Type",
"Access-Control-Expose-Headers": "Content-Range, Accept-Ranges, Content-Length",
"Accept-Ranges": "bytes",
}
# For HEAD requests, just return headers (we can't know size without transcoding)
# For HEAD requests, return headers (estimate size)
if request.method == "HEAD":
# Rough estimate: ~2-3 MB per minute of video
estimated_size = int(video_duration * 50000) # ~50KB per second estimate
headers["Content-Length"] = str(estimated_size)
return Response(status_code=200, headers=headers)
# Note: Range requests are complex with transcoding, so we'll transcode from start
# For better performance with range requests, we'd need to cache transcoded segments
# Handle Range requests for seeking
range_header = request.headers.get("range")
if range_header:
# Parse range request: bytes=START-END
range_value = range_header.strip().lower().replace("bytes=", "")
start_str, _, end_str = range_value.partition("-")
try:
byte_start = int(start_str) if start_str else 0
byte_end = int(end_str) if end_str else None
except ValueError:
# Invalid range, ignore and stream from start
range_header = None
if range_header:
# For seeking, convert byte range to time-based seeking
# Estimate transcoded bitrate (H.264 is typically more efficient than original)
# Use original bitrate if available, otherwise estimate
if original_bitrate:
# H.264 transcoding typically uses 70-80% of original bitrate at same quality
transcoded_bitrate = int(original_bitrate * 0.75)
else:
# Default estimate: 2 Mbps
transcoded_bitrate = 2000000
estimated_total_bytes = int(video_duration * transcoded_bitrate / 8)
if estimated_total_bytes > 0 and byte_start < estimated_total_bytes:
# Calculate time position from byte offset
time_start_sec = (byte_start / estimated_total_bytes) * video_duration
time_start_sec = max(0.0, min(time_start_sec, video_duration - 0.5))
# For seeking, don't limit duration - stream to end
# The browser will handle buffering
duration_sec = None # None means stream to end
# Update headers for range response
# For seeking, we typically don't know the exact end, so estimate
actual_byte_end = min(byte_end or estimated_total_bytes - 1, estimated_total_bytes - 1)
headers["Content-Range"] = f"bytes {byte_start}-{actual_byte_end}/{estimated_total_bytes}"
headers["Content-Length"] = str(actual_byte_end - byte_start + 1)
# Stream from the calculated time position using FFmpeg's -ss flag
# Duration is None, so it will stream to the end
return StreamingResponse(
generate_transcoded_stream(p, time_start_sec, duration_sec),
media_type=content_type,
headers=headers,
status_code=206 # Partial Content
)
# No range request or invalid range - stream from start_time
return StreamingResponse(
generate_transcoded_stream(p, start_time),
media_type=content_type,

View File

@@ -23,8 +23,8 @@ export const VideoModal: React.FC<Props> = ({ fileId, onClose }) => {
controls: true,
autoplay: true,
preload: 'auto',
fluid: true,
responsive: true,
fluid: false, // Disable fluid mode to respect container boundaries
responsive: false, // Disable responsive mode to prevent overflow
playbackRates: [0.5, 1, 1.25, 1.5, 2],
sources: [
{
@@ -65,53 +65,75 @@ export const VideoModal: React.FC<Props> = ({ fileId, onClose }) => {
if (!fileId || !src) return null
return (
<div
className="fixed inset-0 z-[1000] bg-black/60 dark:bg-black/80 backdrop-blur-sm flex items-center justify-center p-4"
onClick={onClose}
>
<>
<style>{`
.video-modal-container .video-js {
width: 100% !important;
height: 100% !important;
max-width: 100% !important;
max-height: 100% !important;
}
.video-modal-container .video-js .vjs-tech {
width: 100% !important;
height: 100% !important;
object-fit: contain;
}
.video-modal-container .video-js .vjs-control-bar {
position: absolute !important;
bottom: 0 !important;
left: 0 !important;
right: 0 !important;
width: 100% !important;
}
`}</style>
<div
className="bg-white dark:bg-gray-800 rounded-lg shadow-2xl w-full max-w-4xl overflow-hidden transform transition-all relative"
onClick={(e: React.MouseEvent) => e.stopPropagation()}
className="fixed inset-0 z-[1000] bg-black/60 dark:bg-black/80 backdrop-blur-sm flex items-center justify-center p-4"
onClick={onClose}
>
{/* Close button - positioned absolutely in top right corner */}
<button
onClick={onClose}
className="absolute top-3 right-3 z-10 inline-flex items-center justify-center w-10 h-10 rounded-lg bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-300 border border-gray-300 dark:border-gray-600 shadow-md hover:bg-red-50 dark:hover:bg-red-900/20 hover:text-red-600 dark:hover:text-red-400 hover:border-red-300 dark:hover:border-red-700 transition-all duration-200"
aria-label="Close modal"
<div
className="video-modal-container bg-white dark:bg-gray-800 rounded-lg shadow-2xl w-full max-w-4xl max-h-[90vh] overflow-hidden transform transition-all relative flex flex-col"
onClick={(e: React.MouseEvent) => e.stopPropagation()}
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" strokeWidth={2.5}>
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
{/* Close button - positioned absolutely in top right corner */}
<button
onClick={onClose}
className="absolute top-3 right-3 z-10 inline-flex items-center justify-center w-10 h-10 rounded-lg bg-white dark:bg-gray-800 text-gray-700 dark:text-gray-300 border border-gray-300 dark:border-gray-600 shadow-md hover:bg-red-50 dark:hover:bg-red-900/20 hover:text-red-600 dark:hover:text-red-400 hover:border-red-300 dark:hover:border-red-700 transition-all duration-200"
aria-label="Close modal"
>
<svg className="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24" strokeWidth={2.5}>
<path strokeLinecap="round" strokeLinejoin="round" d="M6 18L18 6M6 6l12 12" />
</svg>
</button>
<div className="p-4 bg-gray-50 dark:bg-gray-900 border-b border-gray-200 dark:border-gray-700">
<div className="flex items-center justify-between">
<div>
<h3 className="text-lg font-semibold text-gray-900 dark:text-white">Video Player</h3>
<p className="text-xs text-gray-500 dark:text-gray-400 mt-0.5">Watch your recording</p>
<div className="p-4 bg-gray-50 dark:bg-gray-900 border-b border-gray-200 dark:border-gray-700">
<div className="flex items-center justify-between">
<div>
<h3 className="text-lg font-semibold text-gray-900 dark:text-white">Video Player</h3>
<p className="text-xs text-gray-500 dark:text-gray-400 mt-0.5">Watch your recording</p>
</div>
<a
href={`${BASE}/videos/stream?file_id=${encodeURIComponent(fileId)}`}
download={fileId.split('/').pop() || 'video.mp4'}
className="px-3 py-1.5 text-xs bg-blue-600 hover:bg-blue-700 text-white rounded-md transition-colors"
onClick={(e) => e.stopPropagation()}
>
Download Video
</a>
</div>
<a
href={`${BASE}/videos/stream?file_id=${encodeURIComponent(fileId)}`}
download={fileId.split('/').pop() || 'video.mp4'}
className="px-3 py-1.5 text-xs bg-blue-600 hover:bg-blue-700 text-white rounded-md transition-colors"
onClick={(e) => e.stopPropagation()}
>
Download Video
</a>
</div>
</div>
<div className="p-4 bg-black">
<div className="relative w-full" style={{ aspectRatio: '16/9', maxHeight: '70vh' }}>
<video
ref={videoRef}
className="video-js vjs-default-skin w-full h-full"
playsInline
key={fileId}
/>
<div className="flex-1 min-h-0 bg-black p-4 flex items-center justify-center">
<div className="relative w-full max-w-full max-h-full" style={{ aspectRatio: '16/9', maxHeight: 'calc(90vh - 200px)' }}>
<video
ref={videoRef}
className="video-js vjs-default-skin w-full h-full"
playsInline
key={fileId}
/>
</div>
</div>
</div>
</div>
</div>
</>
)
}