Chore: rename api->camera-management-api and web->management-dashboard-web-app; update compose, ignore, README references

This commit is contained in:
Alireza Vaezi
2025-08-07 22:07:25 -04:00
parent 28dab3a366
commit fc2da16728
281 changed files with 19 additions and 19 deletions

View File

@@ -0,0 +1,18 @@
"""
Video Infrastructure Layer.
Contains implementations of domain interfaces using external dependencies
like file systems, FFmpeg, OpenCV, etc.
"""
from .repositories import FileSystemVideoRepository
from .converters import FFmpegVideoConverter
from .metadata_extractors import OpenCVMetadataExtractor
from .caching import InMemoryStreamingCache
__all__ = [
"FileSystemVideoRepository",
"FFmpegVideoConverter",
"OpenCVMetadataExtractor",
"InMemoryStreamingCache",
]

View File

@@ -0,0 +1,176 @@
"""
Streaming Cache Implementations.
In-memory and file-based caching for video streaming optimization.
"""
import asyncio
import logging
from typing import Optional, Dict, Tuple
from datetime import datetime, timedelta
import hashlib
from ..domain.interfaces import StreamingCache
from ..domain.models import StreamRange
class InMemoryStreamingCache(StreamingCache):
"""In-memory cache for video streaming"""
def __init__(self, max_size_mb: int = 100, max_age_minutes: int = 30):
self.max_size_bytes = max_size_mb * 1024 * 1024
self.max_age = timedelta(minutes=max_age_minutes)
self.logger = logging.getLogger(__name__)
# Cache storage: {cache_key: (data, timestamp, size)}
self._cache: Dict[str, Tuple[bytes, datetime, int]] = {}
self._current_size = 0
self._lock = asyncio.Lock()
async def get_cached_range(
self,
file_id: str,
range_request: StreamRange
) -> Optional[bytes]:
"""Get cached byte range"""
cache_key = self._generate_cache_key(file_id, range_request)
async with self._lock:
if cache_key in self._cache:
data, timestamp, size = self._cache[cache_key]
# Check if cache entry is still valid
if datetime.now() - timestamp <= self.max_age:
self.logger.debug(f"Cache hit for {file_id} range {range_request.start}-{range_request.end}")
return data
else:
# Remove expired entry
del self._cache[cache_key]
self._current_size -= size
self.logger.debug(f"Cache entry expired for {file_id}")
return None
async def cache_range(
self,
file_id: str,
range_request: StreamRange,
data: bytes
) -> None:
"""Cache byte range data"""
cache_key = self._generate_cache_key(file_id, range_request)
data_size = len(data)
async with self._lock:
# Check if we need to make space
while self._current_size + data_size > self.max_size_bytes and self._cache:
await self._evict_oldest()
# Add to cache
self._cache[cache_key] = (data, datetime.now(), data_size)
self._current_size += data_size
self.logger.debug(f"Cached {data_size} bytes for {file_id} range {range_request.start}-{range_request.end}")
async def invalidate_file(self, file_id: str) -> None:
"""Invalidate all cached data for a file"""
async with self._lock:
keys_to_remove = [key for key in self._cache.keys() if key.startswith(f"{file_id}:")]
for key in keys_to_remove:
_, _, size = self._cache[key]
del self._cache[key]
self._current_size -= size
if keys_to_remove:
self.logger.info(f"Invalidated {len(keys_to_remove)} cache entries for {file_id}")
async def cleanup_cache(self, max_size_mb: int = 100) -> int:
"""Clean up cache to stay under size limit"""
target_size = max_size_mb * 1024 * 1024
entries_removed = 0
async with self._lock:
# Remove expired entries first
current_time = datetime.now()
expired_keys = [
key for key, (_, timestamp, _) in self._cache.items()
if current_time - timestamp > self.max_age
]
for key in expired_keys:
_, _, size = self._cache[key]
del self._cache[key]
self._current_size -= size
entries_removed += 1
# Remove oldest entries if still over limit
while self._current_size > target_size and self._cache:
await self._evict_oldest()
entries_removed += 1
if entries_removed > 0:
self.logger.info(f"Cache cleanup removed {entries_removed} entries")
return entries_removed
async def _evict_oldest(self) -> None:
"""Evict the oldest cache entry"""
if not self._cache:
return
# Find oldest entry
oldest_key = min(self._cache.keys(), key=lambda k: self._cache[k][1])
_, _, size = self._cache[oldest_key]
del self._cache[oldest_key]
self._current_size -= size
self.logger.debug(f"Evicted cache entry: {oldest_key}")
def _generate_cache_key(self, file_id: str, range_request: StreamRange) -> str:
"""Generate cache key for file and range"""
range_str = f"{range_request.start}-{range_request.end}"
return f"{file_id}:{range_str}"
async def get_cache_stats(self) -> dict:
"""Get cache statistics"""
async with self._lock:
return {
"entries": len(self._cache),
"size_bytes": self._current_size,
"size_mb": self._current_size / (1024 * 1024),
"max_size_mb": self.max_size_bytes / (1024 * 1024),
"utilization_percent": (self._current_size / self.max_size_bytes) * 100
}
class NoOpStreamingCache(StreamingCache):
"""No-operation cache that doesn't actually cache anything"""
def __init__(self):
self.logger = logging.getLogger(__name__)
async def get_cached_range(
self,
file_id: str,
range_request: StreamRange
) -> Optional[bytes]:
"""Always return None (no cache)"""
return None
async def cache_range(
self,
file_id: str,
range_request: StreamRange,
data: bytes
) -> None:
"""No-op caching"""
pass
async def invalidate_file(self, file_id: str) -> None:
"""No-op invalidation"""
pass
async def cleanup_cache(self, max_size_mb: int = 100) -> int:
"""No-op cleanup"""
return 0

View File

@@ -0,0 +1,220 @@
"""
Video Format Converters.
Implementations for converting video formats using FFmpeg.
"""
import asyncio
import logging
import shutil
from typing import Optional
from pathlib import Path
from datetime import datetime, timedelta
from ..domain.interfaces import VideoConverter
from ..domain.models import VideoFormat
class FFmpegVideoConverter(VideoConverter):
"""FFmpeg-based video converter"""
def __init__(self, temp_dir: Optional[Path] = None):
self.logger = logging.getLogger(__name__)
self.temp_dir = temp_dir or Path("/tmp/video_conversions")
self.temp_dir.mkdir(parents=True, exist_ok=True)
# Check if FFmpeg is available
self._ffmpeg_available = shutil.which("ffmpeg") is not None
if not self._ffmpeg_available:
self.logger.warning("FFmpeg not found - video conversion will be disabled")
async def convert(
self,
source_path: Path,
target_path: Path,
target_format: VideoFormat,
quality: Optional[str] = None
) -> bool:
"""Convert video to target format"""
if not self._ffmpeg_available:
self.logger.error("FFmpeg not available for conversion")
return False
try:
# Ensure target directory exists
target_path.parent.mkdir(parents=True, exist_ok=True)
# Build FFmpeg command
cmd = self._build_ffmpeg_command(source_path, target_path, target_format, quality)
self.logger.info(f"Converting {source_path} to {target_path} using FFmpeg")
# Run FFmpeg conversion
process = await asyncio.create_subprocess_exec(
*cmd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
stdout, stderr = await process.communicate()
if process.returncode == 0:
self.logger.info(f"Successfully converted {source_path} to {target_path}")
return True
else:
error_msg = stderr.decode() if stderr else "Unknown FFmpeg error"
self.logger.error(f"FFmpeg conversion failed: {error_msg}")
return False
except Exception as e:
self.logger.error(f"Error during video conversion: {e}")
return False
async def is_conversion_needed(
self,
source_format: VideoFormat,
target_format: VideoFormat
) -> bool:
"""Check if conversion is needed"""
return source_format != target_format
async def get_converted_path(
self,
original_path: Path,
target_format: VideoFormat
) -> Path:
"""Get path for converted file"""
# Place converted files in temp directory with timestamp
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
stem = original_path.stem
converted_filename = f"{stem}_{timestamp}.{target_format.value}"
return self.temp_dir / converted_filename
async def cleanup_converted_files(self, max_age_hours: int = 24) -> int:
"""Clean up old converted files"""
try:
cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
files_removed = 0
if not self.temp_dir.exists():
return 0
for file_path in self.temp_dir.iterdir():
if file_path.is_file():
# Get file modification time
file_mtime = datetime.fromtimestamp(file_path.stat().st_mtime)
if file_mtime < cutoff_time:
try:
file_path.unlink()
files_removed += 1
self.logger.debug(f"Removed old converted file: {file_path}")
except Exception as e:
self.logger.warning(f"Could not remove {file_path}: {e}")
self.logger.info(f"Cleaned up {files_removed} old converted files")
return files_removed
except Exception as e:
self.logger.error(f"Error during converted files cleanup: {e}")
return 0
def _build_ffmpeg_command(
self,
source_path: Path,
target_path: Path,
target_format: VideoFormat,
quality: Optional[str] = None
) -> list:
"""Build FFmpeg command for conversion"""
cmd = ["ffmpeg", "-i", str(source_path)]
# Add format-specific options
if target_format == VideoFormat.MP4:
cmd.extend([
"-c:v", "libx264", # H.264 video codec
"-c:a", "aac", # AAC audio codec
"-movflags", "+faststart", # Enable progressive download
])
# Quality settings
if quality == "high":
cmd.extend(["-crf", "18"])
elif quality == "medium":
cmd.extend(["-crf", "23"])
elif quality == "low":
cmd.extend(["-crf", "28"])
else:
cmd.extend(["-crf", "23"]) # Default medium quality
elif target_format == VideoFormat.WEBM:
cmd.extend([
"-c:v", "libvpx-vp9", # VP9 video codec
"-c:a", "libopus", # Opus audio codec
])
# Quality settings for WebM
if quality == "high":
cmd.extend(["-crf", "15", "-b:v", "0"])
elif quality == "medium":
cmd.extend(["-crf", "20", "-b:v", "0"])
elif quality == "low":
cmd.extend(["-crf", "25", "-b:v", "0"])
else:
cmd.extend(["-crf", "20", "-b:v", "0"]) # Default medium quality
# Common options
cmd.extend([
"-preset", "fast", # Encoding speed vs compression trade-off
"-y", # Overwrite output file
str(target_path)
])
return cmd
class NoOpVideoConverter(VideoConverter):
"""No-operation converter for when FFmpeg is not available"""
def __init__(self):
self.logger = logging.getLogger(__name__)
async def convert(
self,
source_path: Path,
target_path: Path,
target_format: VideoFormat,
quality: Optional[str] = None
) -> bool:
"""No-op conversion - just copy file if formats match"""
try:
if source_path.suffix.lower().lstrip('.') == target_format.value:
# Same format, just copy
shutil.copy2(source_path, target_path)
return True
else:
self.logger.warning(f"Cannot convert {source_path} to {target_format} - no converter available")
return False
except Exception as e:
self.logger.error(f"Error in no-op conversion: {e}")
return False
async def is_conversion_needed(
self,
source_format: VideoFormat,
target_format: VideoFormat
) -> bool:
"""Check if conversion is needed"""
return source_format != target_format
async def get_converted_path(
self,
original_path: Path,
target_format: VideoFormat
) -> Path:
"""Get path for converted file"""
return original_path.with_suffix(f".{target_format.value}")
async def cleanup_converted_files(self, max_age_hours: int = 24) -> int:
"""No-op cleanup"""
return 0

View File

@@ -0,0 +1,201 @@
"""
Video Metadata Extractors.
Implementations for extracting video metadata using OpenCV and other tools.
"""
import asyncio
import logging
from typing import Optional
from pathlib import Path
import cv2
import numpy as np
from ..domain.interfaces import MetadataExtractor
from ..domain.models import VideoMetadata
class OpenCVMetadataExtractor(MetadataExtractor):
"""OpenCV-based metadata extractor"""
def __init__(self):
self.logger = logging.getLogger(__name__)
async def extract(self, file_path: Path) -> Optional[VideoMetadata]:
"""Extract metadata from video file using OpenCV"""
try:
# Run OpenCV operations in thread pool to avoid blocking
return await asyncio.get_event_loop().run_in_executor(
None, self._extract_sync, file_path
)
except Exception as e:
self.logger.error(f"Error extracting metadata from {file_path}: {e}")
return None
def _extract_sync(self, file_path: Path) -> Optional[VideoMetadata]:
"""Synchronous metadata extraction"""
cap = None
try:
cap = cv2.VideoCapture(str(file_path))
if not cap.isOpened():
self.logger.warning(f"Could not open video file: {file_path}")
return None
# Get video properties
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS)
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Calculate duration
duration_seconds = frame_count / fps if fps > 0 else 0.0
# Get codec information
fourcc = int(cap.get(cv2.CAP_PROP_FOURCC))
codec = self._fourcc_to_string(fourcc)
# Try to get bitrate (not always available)
bitrate = cap.get(cv2.CAP_PROP_BITRATE)
bitrate = int(bitrate) if bitrate > 0 else None
return VideoMetadata(
duration_seconds=duration_seconds,
width=width,
height=height,
fps=fps,
codec=codec,
bitrate=bitrate
)
except Exception as e:
self.logger.error(f"Error in sync metadata extraction: {e}")
return None
finally:
if cap is not None:
cap.release()
async def extract_thumbnail(
self,
file_path: Path,
timestamp_seconds: float = 1.0,
size: tuple = (320, 240)
) -> Optional[bytes]:
"""Extract thumbnail image from video"""
try:
return await asyncio.get_event_loop().run_in_executor(
None, self._extract_thumbnail_sync, file_path, timestamp_seconds, size
)
except Exception as e:
self.logger.error(f"Error extracting thumbnail from {file_path}: {e}")
return None
def _extract_thumbnail_sync(
self,
file_path: Path,
timestamp_seconds: float,
size: tuple
) -> Optional[bytes]:
"""Synchronous thumbnail extraction"""
cap = None
try:
cap = cv2.VideoCapture(str(file_path))
if not cap.isOpened():
return None
# Get video FPS to calculate frame number
fps = cap.get(cv2.CAP_PROP_FPS)
if fps <= 0:
fps = 30 # Default fallback
# Calculate target frame
target_frame = int(timestamp_seconds * fps)
# Set position to target frame
cap.set(cv2.CAP_PROP_POS_FRAMES, target_frame)
# Read frame
ret, frame = cap.read()
if not ret or frame is None:
# Fallback to first frame
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
ret, frame = cap.read()
if not ret or frame is None:
return None
# Resize frame to thumbnail size
thumbnail = cv2.resize(frame, size)
# Encode as JPEG
success, buffer = cv2.imencode('.jpg', thumbnail, [cv2.IMWRITE_JPEG_QUALITY, 85])
if success:
return buffer.tobytes()
return None
except Exception as e:
self.logger.error(f"Error in sync thumbnail extraction: {e}")
return None
finally:
if cap is not None:
cap.release()
async def is_valid_video(self, file_path: Path) -> bool:
"""Check if file is a valid video"""
try:
return await asyncio.get_event_loop().run_in_executor(
None, self._is_valid_video_sync, file_path
)
except Exception as e:
self.logger.error(f"Error validating video {file_path}: {e}")
return False
def _is_valid_video_sync(self, file_path: Path) -> bool:
"""Synchronous video validation"""
cap = None
try:
if not file_path.exists():
return False
cap = cv2.VideoCapture(str(file_path))
if not cap.isOpened():
return False
# Try to read first frame
ret, frame = cap.read()
return ret and frame is not None
except Exception:
return False
finally:
if cap is not None:
cap.release()
def _fourcc_to_string(self, fourcc: int) -> str:
"""Convert OpenCV fourcc code to string"""
try:
# Convert fourcc integer to 4-character string
fourcc_bytes = [
(fourcc & 0xFF),
((fourcc >> 8) & 0xFF),
((fourcc >> 16) & 0xFF),
((fourcc >> 24) & 0xFF)
]
# Convert to string, handling non-printable characters
codec_chars = []
for byte_val in fourcc_bytes:
if 32 <= byte_val <= 126: # Printable ASCII
codec_chars.append(chr(byte_val))
else:
codec_chars.append('?')
return ''.join(codec_chars).strip()
except Exception:
return "UNKNOWN"

View File

@@ -0,0 +1,183 @@
"""
Video Repository Implementations.
File system-based implementation of video repository interface.
"""
import asyncio
import logging
from typing import List, Optional, BinaryIO
from datetime import datetime
from pathlib import Path
import aiofiles
from ..domain.interfaces import VideoRepository
from ..domain.models import VideoFile, VideoFormat, VideoStatus, StreamRange
from ...core.config import Config
from ...storage.manager import StorageManager
class FileSystemVideoRepository(VideoRepository):
"""File system implementation of video repository"""
def __init__(self, config: Config, storage_manager: StorageManager):
self.config = config
self.storage_manager = storage_manager
self.logger = logging.getLogger(__name__)
async def get_by_id(self, file_id: str) -> Optional[VideoFile]:
"""Get video file by ID"""
try:
# Get file info from storage manager
file_info = self.storage_manager.get_file_info(file_id)
if not file_info:
return None
return self._convert_to_video_file(file_info)
except Exception as e:
self.logger.error(f"Error getting video by ID {file_id}: {e}")
return None
async def get_by_camera(
self,
camera_name: str,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
limit: Optional[int] = None
) -> List[VideoFile]:
"""Get video files for a camera with optional filters"""
try:
# Use storage manager to get files
files = self.storage_manager.get_recording_files(
camera_name=camera_name,
start_date=start_date,
end_date=end_date,
limit=limit
)
return [self._convert_to_video_file(file_info) for file_info in files]
except Exception as e:
self.logger.error(f"Error getting videos for camera {camera_name}: {e}")
return []
async def get_all(
self,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
limit: Optional[int] = None
) -> List[VideoFile]:
"""Get all video files with optional filters"""
try:
# Get files from all cameras
files = self.storage_manager.get_recording_files(
camera_name=None, # All cameras
start_date=start_date,
end_date=end_date,
limit=limit
)
return [self._convert_to_video_file(file_info) for file_info in files]
except Exception as e:
self.logger.error(f"Error getting all videos: {e}")
return []
async def exists(self, file_id: str) -> bool:
"""Check if video file exists"""
try:
video_file = await self.get_by_id(file_id)
return video_file is not None and video_file.file_path.exists()
except Exception as e:
self.logger.error(f"Error checking if video exists {file_id}: {e}")
return False
async def get_file_stream(self, video_file: VideoFile) -> BinaryIO:
"""Get file stream for reading video data"""
try:
# Use aiofiles for async file operations
return await aiofiles.open(video_file.file_path, 'rb')
except Exception as e:
self.logger.error(f"Error opening file stream for {video_file.file_id}: {e}")
raise
async def get_file_range(
self,
video_file: VideoFile,
range_request: StreamRange
) -> bytes:
"""Get specific byte range from video file"""
try:
async with aiofiles.open(video_file.file_path, 'rb') as f:
# Seek to start position
await f.seek(range_request.start)
# Calculate how many bytes to read
if range_request.end is not None:
bytes_to_read = range_request.end - range_request.start + 1
data = await f.read(bytes_to_read)
else:
# Read to end of file
data = await f.read()
return data
except Exception as e:
self.logger.error(f"Error reading file range for {video_file.file_id}: {e}")
raise
def _convert_to_video_file(self, file_info: dict) -> VideoFile:
"""Convert storage manager file info to VideoFile domain model"""
try:
file_path = Path(file_info["filename"])
# Determine video format from extension
extension = file_path.suffix.lower().lstrip('.')
if extension == 'avi':
format = VideoFormat.AVI
elif extension == 'mp4':
format = VideoFormat.MP4
elif extension == 'webm':
format = VideoFormat.WEBM
else:
format = VideoFormat.AVI # Default fallback
# Parse status
status_str = file_info.get("status", "unknown")
try:
status = VideoStatus(status_str)
except ValueError:
status = VideoStatus.UNKNOWN
# Parse timestamps
start_time = None
if file_info.get("start_time"):
start_time = datetime.fromisoformat(file_info["start_time"])
end_time = None
if file_info.get("end_time"):
end_time = datetime.fromisoformat(file_info["end_time"])
created_at = start_time or datetime.now()
return VideoFile(
file_id=file_info["file_id"],
camera_name=file_info["camera_name"],
filename=file_info["filename"],
file_path=file_path,
file_size_bytes=file_info.get("file_size_bytes", 0),
created_at=created_at,
status=status,
format=format,
start_time=start_time,
end_time=end_time,
machine_trigger=file_info.get("machine_trigger"),
error_message=None # Could be added to storage manager later
)
except Exception as e:
self.logger.error(f"Error converting file info to VideoFile: {e}")
raise