feat: Export NixOS Module for usda-vision service config
This commit is contained in:
@@ -79,13 +79,6 @@ Secrets are managed by ragenix in the athenix flake:
|
||||
owner = "root";
|
||||
group = "root";
|
||||
};
|
||||
|
||||
age.secrets.usda-vision-azure-env = {
|
||||
file = ./secrets/usda-vision/azure-env.age;
|
||||
mode = "0644";
|
||||
owner = "root";
|
||||
group = "root";
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
@@ -108,6 +101,12 @@ Simply import the default.nix - it will access inputs and age secrets automatica
|
||||
file = ./secrets/usda-vision/env.age;
|
||||
mode = "0644";
|
||||
};
|
||||
|
||||
# Optional: Configure hostname replacement (defaults shown)
|
||||
services.usda-vision = {
|
||||
hostname = "192.168.1.156"; # Default: 192.168.1.156
|
||||
replaceHostnames = true; # Default: true - replaces exp-dash/localhost
|
||||
};
|
||||
}
|
||||
```
|
||||
|
||||
@@ -126,9 +125,17 @@ in
|
||||
imports = [
|
||||
"${usda-dash-config}/default.nix"
|
||||
];
|
||||
|
||||
# Set a custom hostname for your deployment
|
||||
services.usda-vision.hostname = "my-custom-host.local";
|
||||
}
|
||||
```
|
||||
|
||||
### Module Options
|
||||
|
||||
- `services.usda-vision.hostname` (string, default: `"192.168.1.156"`) - Hostname or IP to replace exp-dash/localhost with
|
||||
- `services.usda-vision.replaceHostnames` (bool, default: `true`) - Whether to perform hostname replacement in docker-compose.yml
|
||||
|
||||
## Complete Example
|
||||
|
||||
Here's a complete example of how it all fits together:
|
||||
|
||||
257
default.nix
257
default.nix
@@ -7,15 +7,9 @@
|
||||
#
|
||||
# 1. Add required flake inputs in athenix/flake.nix:
|
||||
#
|
||||
# inputs = {
|
||||
# usda-vision = {
|
||||
# url = "git+https://git.factory.uga.edu/MODEL/usda-vision.git";
|
||||
# inputs.nixpkgs.follows = "nixpkgs";
|
||||
# };
|
||||
# ragenix = {
|
||||
# url = "github:yaxitech/ragenix";
|
||||
# inputs.nixpkgs.follows = "nixpkgs";
|
||||
# };
|
||||
# inputs.usda-vision = {
|
||||
# url = "git+https://git.factory.uga.edu/MODEL/usda-vision.git";
|
||||
# inputs.nixpkgs.follows = "nixpkgs";
|
||||
# };
|
||||
#
|
||||
# 2. Pass inputs to modules via specialArgs:
|
||||
@@ -27,239 +21,78 @@
|
||||
# ];
|
||||
# };
|
||||
#
|
||||
# 3. Configure secrets in your athenix configuration:
|
||||
# 3. Configure secrets in athenix:
|
||||
#
|
||||
# age.secrets.usda-vision-env = {
|
||||
# file = ./secrets/usda-vision/env.age;
|
||||
# };
|
||||
{ inputs, ... }:
|
||||
{ config
|
||||
, lib
|
||||
, pkgs
|
||||
, ...
|
||||
}:
|
||||
# age.secrets.usda-vision-env.file = ./secrets/usda-vision/env.age;
|
||||
|
||||
let
|
||||
# Get packages from flake inputs
|
||||
camera-sdk = inputs.usda-vision.packages.${pkgs.system}.camera-sdk;
|
||||
usda-vision-app = inputs.usda-vision.packages.${pkgs.system}.usda-vision;
|
||||
|
||||
# Get secret paths from age configuration (if configured)
|
||||
envFile = config.age.secrets.usda-vision-env.path or null;
|
||||
in
|
||||
{ inputs, ... }:
|
||||
{ config, lib, pkgs, ... }:
|
||||
|
||||
{
|
||||
# ========== Module Configuration ==========
|
||||
imports = [
|
||||
# Import the usda-vision NixOS module
|
||||
inputs.usda-vision.nixosModules.default
|
||||
];
|
||||
|
||||
config = {
|
||||
# Nix configuration for LXC container without sandbox support
|
||||
# Enable and configure usda-vision
|
||||
services.usda-vision = {
|
||||
enable = true;
|
||||
package = inputs.usda-vision.packages.${pkgs.system};
|
||||
hostname = "192.168.1.156";
|
||||
replaceHostnames = true;
|
||||
envFile = config.age.secrets.usda-vision-env.path or null;
|
||||
};
|
||||
|
||||
# Nix configuration for LXC container
|
||||
nix.settings = {
|
||||
sandbox = false; # LXC containers don't support kernel namespaces for sandboxing
|
||||
sandbox = false;
|
||||
experimental-features = [ "nix-command" "flakes" ];
|
||||
};
|
||||
|
||||
# System packages specific to usda-dash
|
||||
|
||||
# LXC-specific settings for nested containers
|
||||
boot.kernel.sysctl = {
|
||||
"net.ipv4.ip_forward" = 1;
|
||||
"net.ipv4.conf.all.forwarding" = 1;
|
||||
};
|
||||
|
||||
# Configure users
|
||||
athenix.users.sv22900.enable = true;
|
||||
users.users.sv22900.extraGroups = [ "docker" ];
|
||||
users.users.engr-ugaif.extraGroups = [ "docker" ];
|
||||
|
||||
# Additional system packages
|
||||
environment.systemPackages = with pkgs; [
|
||||
# Core tools
|
||||
git
|
||||
vim
|
||||
htop
|
||||
curl
|
||||
wget
|
||||
nfs-utils
|
||||
|
||||
# Docker and Docker Compose for running usda-vision
|
||||
docker
|
||||
docker-compose
|
||||
|
||||
# Supabase
|
||||
supabase-cli
|
||||
|
||||
# Camera SDK
|
||||
camera-sdk
|
||||
|
||||
# USDA Vision application package with convenience scripts
|
||||
usda-vision-app
|
||||
];
|
||||
|
||||
# Make camera SDK libraries available system-wide
|
||||
environment.variables = {
|
||||
LD_LIBRARY_PATH = "${camera-sdk}/lib";
|
||||
};
|
||||
|
||||
# Enable Docker service with LXC-compatible settings
|
||||
virtualisation.docker = {
|
||||
enable = true;
|
||||
autoPrune.enable = true;
|
||||
# Enable experimental features for better LXC compatibility
|
||||
daemon.settings = {
|
||||
experimental = true;
|
||||
};
|
||||
};
|
||||
|
||||
# LXC-specific settings for nested containers
|
||||
boot.kernel.sysctl = {
|
||||
# Required for Docker networking in LXC
|
||||
"net.ipv4.ip_forward" = 1;
|
||||
"net.ipv4.conf.all.forwarding" = 1;
|
||||
};
|
||||
|
||||
# Configure users
|
||||
athenix.users.sv22900.enable = true;
|
||||
|
||||
# Add users to docker group
|
||||
users.users.sv22900.extraGroups = [ "docker" ];
|
||||
users.users.engr-ugaif.extraGroups = [ "docker" ];
|
||||
|
||||
# Create persistent directories and .env file location
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /var/lib/usda-vision 0755 root root -"
|
||||
"f /var/lib/usda-vision/.env 0644 root root -"
|
||||
"d /var/lib/supabase 0755 root root -"
|
||||
"d /mnt/nfs_share 0755 root root -"
|
||||
];
|
||||
|
||||
# Enable NFS client support
|
||||
# NFS mount
|
||||
services.rpcbind.enable = true;
|
||||
|
||||
# NFS mount for shared storage
|
||||
fileSystems."/mnt/nfs_share" = {
|
||||
device = "192.168.1.249:/mnt/nfs_share";
|
||||
fsType = "nfs";
|
||||
options = [ "nfsvers=4" "rw" "soft" "_netdev" ];
|
||||
};
|
||||
|
||||
# Supabase CLI configuration - runs in writable directory
|
||||
systemd.services.supabase-cli = {
|
||||
enable = true;
|
||||
description = "Supabase CLI Service";
|
||||
|
||||
preStart = ''
|
||||
# Clean slate - remove old content but keep the directory
|
||||
rm -rf /var/lib/supabase/*
|
||||
rm -rf /var/lib/supabase/.* 2>/dev/null || true
|
||||
|
||||
# Copy supabase directory structure from the app
|
||||
if [ -d ${usda-vision-app}/opt/usda-vision/supabase ]; then
|
||||
${pkgs.rsync}/bin/rsync -av ${usda-vision-app}/opt/usda-vision/supabase/ /var/lib/supabase/supabase/
|
||||
fi
|
||||
|
||||
# Create necessary directories for supabase
|
||||
mkdir -p /var/lib/supabase/supabase/.branches
|
||||
chmod -R 755 /var/lib/supabase
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
WorkingDirectory = "/var/lib/supabase";
|
||||
EnvironmentFile = envFile;
|
||||
ExecStart = "${pkgs.supabase-cli}/bin/supabase start";
|
||||
ExecStop = "${pkgs.supabase-cli}/bin/supabase stop";
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
User = "root";
|
||||
Group = "root";
|
||||
};
|
||||
};
|
||||
systemd.tmpfiles.rules = [
|
||||
"d /mnt/nfs_share 0755 root root -"
|
||||
];
|
||||
|
||||
# Systemd service to manage usda-vision docker compose
|
||||
systemd.services.usda-vision = {
|
||||
description = "USDA Vision Docker Compose Stack";
|
||||
after = [ "docker.service" "network-online.target" "systemd-tmpfiles-setup.service" ];
|
||||
wants = [ "network-online.target" ];
|
||||
wantedBy = [ "multi-user.target" ];
|
||||
|
||||
# Only start if .env file exists (will be managed by ragenix)
|
||||
unitConfig = lib.mkIf (envFile != null) {
|
||||
ConditionPathExists = envFile;
|
||||
};
|
||||
|
||||
preStart = ''
|
||||
# Copy application code to writable directory if not already present or if source is newer
|
||||
echo "Syncing application code to /var/lib/usda-vision..."
|
||||
${pkgs.rsync}/bin/rsync -av --delete \
|
||||
--checksum \
|
||||
--exclude='node_modules' \
|
||||
--exclude='.env' \
|
||||
--exclude='.env.azure' \
|
||||
--exclude='__pycache__' \
|
||||
--exclude='.venv' \
|
||||
${usda-vision-app}/opt/usda-vision/ /var/lib/usda-vision/
|
||||
|
||||
# Copy ragenix-managed secrets to working directory
|
||||
${lib.optionalString (envFile != null) ''
|
||||
echo "Copying environment file from ragenix-managed secret..."
|
||||
cp ${envFile} /var/lib/usda-vision/.env
|
||||
chmod 644 /var/lib/usda-vision/.env
|
||||
''}
|
||||
|
||||
# Fallback: use example file if no secrets provided
|
||||
${lib.optionalString (envFile == null) ''
|
||||
if [ ! -s /var/lib/usda-vision/.env ]; then
|
||||
if [ -f ${usda-vision-app}/opt/usda-vision/.env.example ]; then
|
||||
echo "WARNING: No ragenix-managed secrets provided, using .env.example"
|
||||
echo "Please configure secrets in athenix using ragenix"
|
||||
cp ${usda-vision-app}/opt/usda-vision/.env.example /var/lib/usda-vision/.env
|
||||
fi
|
||||
fi
|
||||
''}
|
||||
'';
|
||||
|
||||
serviceConfig = {
|
||||
Type = "oneshot";
|
||||
RemainAfterExit = true;
|
||||
WorkingDirectory = "/var/lib/usda-vision";
|
||||
User = "root";
|
||||
Group = "root";
|
||||
|
||||
# Start: pull latest images and start containers from writable directory
|
||||
ExecStart = "${pkgs.docker-compose}/bin/docker-compose -f /var/lib/usda-vision/docker-compose.yml up -d --build";
|
||||
|
||||
# Stop: gracefully stop containers
|
||||
ExecStop = "${pkgs.docker-compose}/bin/docker-compose -f /var/lib/usda-vision/docker-compose.yml down";
|
||||
|
||||
# Reload: restart containers
|
||||
ExecReload = "${pkgs.bash}/bin/bash -c '${pkgs.docker-compose}/bin/docker-compose -f /var/lib/usda-vision/docker-compose.yml down && ${pkgs.docker-compose}/bin/docker-compose -f /var/lib/usda-vision/docker-compose.yml up -d --build'";
|
||||
|
||||
TimeoutStartSec = 300;
|
||||
TimeoutStopSec = 120;
|
||||
};
|
||||
};
|
||||
|
||||
# Firewall configuration - open ports for USDA Vision services
|
||||
# Firewall configuration
|
||||
networking.firewall = {
|
||||
enable = false;
|
||||
allowedTCPPorts = [
|
||||
# Web services
|
||||
80 # HTTP
|
||||
443 # HTTPS
|
||||
3000 # Main web app (if exposed directly)
|
||||
3001 # Vision video remote web app
|
||||
3002 # Vision system remote web app
|
||||
3003 # Scheduling remote web app
|
||||
4000 # Analytics service web app
|
||||
|
||||
# Supabase services
|
||||
54321 # Supabase Kong (API Gateway)
|
||||
54322 # Supabase PostgreSQL
|
||||
54323 # Supabase Studio
|
||||
54324 # Supabase Inbucket (email testing)
|
||||
54327 # Supabase Analytics
|
||||
|
||||
# USDA Vision services
|
||||
8000 # Camera Management API
|
||||
8025 # Mailpit (email testing)
|
||||
8090 # Media API
|
||||
8189 # MediaMTX API
|
||||
8554 # RTSP (MediaMTX)
|
||||
8889 # MediaMTX WebRTC
|
||||
80 443 3000 3001 3002 3003 4000
|
||||
54321 54322 54323 54324 54327
|
||||
8000 8025 8090 8189 8554 8889
|
||||
];
|
||||
|
||||
allowedUDPPorts = [
|
||||
3956 # GigE Vision Control Protocol (GVCP)
|
||||
];
|
||||
|
||||
allowedUDPPorts = [ 3956 ];
|
||||
allowPing = true;
|
||||
};
|
||||
|
||||
# Any other usda-dash specific configuration
|
||||
};
|
||||
}
|
||||
|
||||
Submodule usda-vision updated: 20a01c89af...dce72a6ab9
Reference in New Issue
Block a user