feat(platform): add new services, tools, tests and crews modules
New router intelligence modules (26 files): alert_ingest/store, audit_store, architecture_pressure, backlog_generator/store, cost_analyzer, data_governance, dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment, platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files), signature_state_store, sofiia_auto_router, tool_governance New services: - sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static - memory-service: integration_endpoints, integrations, voice_endpoints, static UI - aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents) - sofiia-supervisor: new supervisor service - aistalk-bridge-lite: Telegram bridge lite - calendar-service: CalDAV calendar service with reminders - mlx-stt-service / mlx-tts-service: Apple Silicon speech services - binance-bot-monitor: market monitor service - node-worker: STT/TTS memory providers New tools (9): agent_email, browser_tool, contract_tool, observability_tool, oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus, farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine, session_context, style_adapter, telemetry) Tests: 85+ test files for all new modules Made-with: Cursor
This commit is contained in:
19
services/aurora-service/Dockerfile
Normal file
19
services/aurora-service/Dockerfile
Normal file
@@ -0,0 +1,19 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ffmpeg libgl1 libglib2.0-0 \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY app/ ./app/
|
||||
|
||||
EXPOSE 9401
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=20s --retries=5 \
|
||||
CMD python -c "import urllib.request; urllib.request.urlopen('http://localhost:9401/health')"
|
||||
|
||||
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "9401"]
|
||||
1
services/aurora-service/app/__init__.py
Normal file
1
services/aurora-service/app/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Aurora media forensics service package."""
|
||||
417
services/aurora-service/app/analysis.py
Normal file
417
services/aurora-service/app/analysis.py
Normal file
@@ -0,0 +1,417 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import math
|
||||
import statistics
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
try:
|
||||
import cv2 # type: ignore[import-untyped]
|
||||
except Exception: # pragma: no cover
|
||||
cv2 = None
|
||||
|
||||
|
||||
def _safe_float(value: Any, default: float = 0.0) -> float:
|
||||
try:
|
||||
return float(value)
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
|
||||
def _safe_int(value: Any, default: int = 0) -> int:
|
||||
try:
|
||||
return int(float(value))
|
||||
except Exception:
|
||||
return default
|
||||
|
||||
|
||||
def _iso_clamp(v: int, lo: int, hi: int) -> int:
|
||||
return max(lo, min(hi, v))
|
||||
|
||||
|
||||
def _detect_faces(gray_img) -> List[Dict[str, Any]]:
|
||||
if cv2 is None:
|
||||
return []
|
||||
cascade_path = str(Path(cv2.data.haarcascades) / "haarcascade_frontalface_default.xml")
|
||||
detector = cv2.CascadeClassifier(cascade_path)
|
||||
if detector.empty():
|
||||
return []
|
||||
faces = detector.detectMultiScale(
|
||||
gray_img,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=4,
|
||||
minSize=(20, 20),
|
||||
)
|
||||
out: List[Dict[str, Any]] = []
|
||||
for (x, y, w, h) in faces:
|
||||
out.append(
|
||||
{
|
||||
"bbox": [int(x), int(y), int(w), int(h)],
|
||||
"confidence": 0.75,
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def _detect_plates(gray_img) -> List[Dict[str, Any]]:
|
||||
if cv2 is None:
|
||||
return []
|
||||
cascade_path = str(Path(cv2.data.haarcascades) / "haarcascade_russian_plate_number.xml")
|
||||
if not Path(cascade_path).exists():
|
||||
return []
|
||||
detector = cv2.CascadeClassifier(cascade_path)
|
||||
if detector.empty():
|
||||
return []
|
||||
plates = detector.detectMultiScale(
|
||||
gray_img,
|
||||
scaleFactor=1.1,
|
||||
minNeighbors=3,
|
||||
minSize=(28, 10),
|
||||
)
|
||||
out: List[Dict[str, Any]] = []
|
||||
for (x, y, w, h) in plates:
|
||||
out.append(
|
||||
{
|
||||
"bbox": [int(x), int(y), int(w), int(h)],
|
||||
"confidence": 0.65,
|
||||
"text": None,
|
||||
}
|
||||
)
|
||||
return out
|
||||
|
||||
|
||||
def _noise_label(noise_sigma: float) -> str:
|
||||
if noise_sigma >= 28:
|
||||
return "high"
|
||||
if noise_sigma >= 14:
|
||||
return "medium"
|
||||
return "low"
|
||||
|
||||
|
||||
def _brightness_label(brightness: float) -> str:
|
||||
if brightness < 75:
|
||||
return "low"
|
||||
if brightness > 180:
|
||||
return "high"
|
||||
return "medium"
|
||||
|
||||
|
||||
def _blur_label(laplacian_var: float) -> str:
|
||||
if laplacian_var < 45:
|
||||
return "high"
|
||||
if laplacian_var < 120:
|
||||
return "medium"
|
||||
return "low"
|
||||
|
||||
|
||||
def _analyze_quality(gray_img) -> Dict[str, Any]:
|
||||
if cv2 is None:
|
||||
return {
|
||||
"noise_level": "unknown",
|
||||
"brightness": "unknown",
|
||||
"blur_level": "unknown",
|
||||
"brightness_value": None,
|
||||
"noise_sigma": None,
|
||||
"laplacian_var": None,
|
||||
}
|
||||
brightness = float(gray_img.mean())
|
||||
noise_sigma = float(gray_img.std())
|
||||
lap_var = float(cv2.Laplacian(gray_img, cv2.CV_64F).var())
|
||||
return {
|
||||
"noise_level": _noise_label(noise_sigma),
|
||||
"brightness": _brightness_label(brightness),
|
||||
"blur_level": _blur_label(lap_var),
|
||||
"brightness_value": round(brightness, 2),
|
||||
"noise_sigma": round(noise_sigma, 2),
|
||||
"laplacian_var": round(lap_var, 2),
|
||||
}
|
||||
|
||||
|
||||
def _aggregate_quality(samples: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
if not samples:
|
||||
return {
|
||||
"noise_level": "unknown",
|
||||
"brightness": "unknown",
|
||||
"blur_level": "unknown",
|
||||
"brightness_value": None,
|
||||
"noise_sigma": None,
|
||||
"laplacian_var": None,
|
||||
}
|
||||
brightness_values = [float(s["brightness_value"]) for s in samples if s.get("brightness_value") is not None]
|
||||
noise_values = [float(s["noise_sigma"]) for s in samples if s.get("noise_sigma") is not None]
|
||||
lap_values = [float(s["laplacian_var"]) for s in samples if s.get("laplacian_var") is not None]
|
||||
brightness = statistics.mean(brightness_values) if brightness_values else 0.0
|
||||
noise_sigma = statistics.mean(noise_values) if noise_values else 0.0
|
||||
lap_var = statistics.mean(lap_values) if lap_values else 0.0
|
||||
return {
|
||||
"noise_level": _noise_label(noise_sigma),
|
||||
"brightness": _brightness_label(brightness),
|
||||
"blur_level": _blur_label(lap_var),
|
||||
"brightness_value": round(brightness, 2),
|
||||
"noise_sigma": round(noise_sigma, 2),
|
||||
"laplacian_var": round(lap_var, 2),
|
||||
}
|
||||
|
||||
|
||||
def probe_video_metadata(path: Path) -> Dict[str, Any]:
|
||||
cmd = [
|
||||
"ffprobe",
|
||||
"-v",
|
||||
"error",
|
||||
"-select_streams",
|
||||
"v:0",
|
||||
"-show_entries",
|
||||
"stream=width,height,nb_frames,r_frame_rate,duration",
|
||||
"-show_entries",
|
||||
"format=duration",
|
||||
"-of",
|
||||
"json",
|
||||
str(path),
|
||||
]
|
||||
try:
|
||||
p = subprocess.run(cmd, check=False, capture_output=True, text=True)
|
||||
if p.returncode != 0 or not p.stdout:
|
||||
return {}
|
||||
payload = json.loads(p.stdout)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
stream = (payload.get("streams") or [{}])[0] if isinstance(payload, dict) else {}
|
||||
fmt = payload.get("format") or {}
|
||||
width = _safe_int(stream.get("width"))
|
||||
height = _safe_int(stream.get("height"))
|
||||
nb_frames = _safe_int(stream.get("nb_frames"))
|
||||
fps_raw = str(stream.get("r_frame_rate") or "0/1")
|
||||
duration = _safe_float(stream.get("duration")) or _safe_float(fmt.get("duration"))
|
||||
fps = 0.0
|
||||
if "/" in fps_raw:
|
||||
num_s, den_s = fps_raw.split("/", 1)
|
||||
num = _safe_float(num_s)
|
||||
den = _safe_float(den_s, 1.0)
|
||||
if den > 0:
|
||||
fps = num / den
|
||||
elif fps_raw:
|
||||
fps = _safe_float(fps_raw)
|
||||
if nb_frames <= 0 and duration > 0 and fps > 0:
|
||||
nb_frames = int(duration * fps)
|
||||
return {
|
||||
"width": width,
|
||||
"height": height,
|
||||
"fps": round(fps, 3) if fps > 0 else None,
|
||||
"frame_count": nb_frames if nb_frames > 0 else None,
|
||||
"duration_seconds": round(duration, 3) if duration > 0 else None,
|
||||
}
|
||||
|
||||
|
||||
def estimate_processing_seconds(
|
||||
*,
|
||||
media_type: str,
|
||||
mode: str,
|
||||
width: int = 0,
|
||||
height: int = 0,
|
||||
frame_count: int = 0,
|
||||
) -> Optional[int]:
|
||||
if media_type == "video":
|
||||
if frame_count <= 0:
|
||||
return None
|
||||
megapixels = max(0.15, (max(1, width) * max(1, height)) / 1_000_000.0)
|
||||
per_frame = 0.8 * megapixels if mode == "tactical" else 1.35 * megapixels
|
||||
per_frame = max(0.08, min(9.0, per_frame))
|
||||
overhead = 6 if mode == "tactical" else 12
|
||||
return int(math.ceil(frame_count * per_frame + overhead))
|
||||
if media_type == "photo":
|
||||
megapixels = max(0.15, (max(1, width) * max(1, height)) / 1_000_000.0)
|
||||
base = 3.0 if mode == "tactical" else 6.0
|
||||
return int(math.ceil(base + megapixels * (3.0 if mode == "tactical" else 5.0)))
|
||||
return None
|
||||
|
||||
|
||||
def _recommendations(
|
||||
*,
|
||||
faces_count: int,
|
||||
plates_count: int,
|
||||
quality: Dict[str, Any],
|
||||
media_type: str,
|
||||
) -> Tuple[List[str], str]:
|
||||
recs: List[str] = []
|
||||
noise_level = quality.get("noise_level")
|
||||
brightness = quality.get("brightness")
|
||||
blur_level = quality.get("blur_level")
|
||||
|
||||
if noise_level == "high":
|
||||
recs.append("Enable denoise (FastDVDnet/SCUNet) before enhancement.")
|
||||
if brightness == "low":
|
||||
recs.append("Apply low-light normalization before super-resolution.")
|
||||
if blur_level in {"medium", "high"}:
|
||||
recs.append("Enable sharpening after upscaling to recover edges.")
|
||||
if faces_count > 0:
|
||||
recs.append("Run face restoration (GFPGAN) as priority stage.")
|
||||
if plates_count > 0:
|
||||
recs.append("Run license-plate ROI enhancement with focused sharpening.")
|
||||
if not recs:
|
||||
recs.append("Balanced enhancement pipeline is sufficient for this media.")
|
||||
|
||||
if faces_count > 0 and faces_count >= plates_count:
|
||||
priority = "faces"
|
||||
elif plates_count > 0:
|
||||
priority = "plates"
|
||||
elif media_type == "photo":
|
||||
priority = "details"
|
||||
else:
|
||||
priority = "balanced"
|
||||
return recs, priority
|
||||
|
||||
|
||||
def _suggested_export(media_type: str, quality: Dict[str, Any], width: int, height: int) -> Dict[str, Any]:
|
||||
if media_type == "video":
|
||||
if width >= 3840 or height >= 2160:
|
||||
resolution = "original"
|
||||
elif width >= 1920 or height >= 1080:
|
||||
resolution = "4k"
|
||||
else:
|
||||
resolution = "1080p"
|
||||
codec = "mp4_h264" if quality.get("noise_level") != "high" else "mp4_h265"
|
||||
return {
|
||||
"resolution": resolution,
|
||||
"format": codec,
|
||||
"roi": "auto_faces",
|
||||
}
|
||||
return {
|
||||
"resolution": "original",
|
||||
"format": "png",
|
||||
"roi": "full_frame",
|
||||
}
|
||||
|
||||
|
||||
def analyze_photo(path: Path) -> Dict[str, Any]:
|
||||
if cv2 is None:
|
||||
raise RuntimeError("opencv-python-headless is not installed")
|
||||
frame = cv2.imread(str(path), cv2.IMREAD_COLOR)
|
||||
if frame is None:
|
||||
raise RuntimeError("Cannot decode uploaded image")
|
||||
h, w = frame.shape[:2]
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
||||
faces = _detect_faces(gray)
|
||||
plates = _detect_plates(gray)
|
||||
quality = _analyze_quality(gray)
|
||||
recs, priority = _recommendations(
|
||||
faces_count=len(faces),
|
||||
plates_count=len(plates),
|
||||
quality=quality,
|
||||
media_type="photo",
|
||||
)
|
||||
return {
|
||||
"media_type": "photo",
|
||||
"frame_sampled": 1,
|
||||
"resolution": {"width": w, "height": h},
|
||||
"faces": faces,
|
||||
"license_plates": plates,
|
||||
"quality_analysis": quality,
|
||||
"recommendations": recs,
|
||||
"suggested_priority": priority,
|
||||
"suggested_export": _suggested_export("photo", quality, w, h),
|
||||
"estimated_processing_seconds": estimate_processing_seconds(
|
||||
media_type="photo",
|
||||
mode="tactical",
|
||||
width=w,
|
||||
height=h,
|
||||
frame_count=1,
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
def _sample_video_frames(path: Path, max_samples: int = 24) -> Tuple[List[Tuple[int, Any]], Dict[str, Any]]:
|
||||
if cv2 is None:
|
||||
raise RuntimeError("opencv-python-headless is not installed")
|
||||
cap = cv2.VideoCapture(str(path))
|
||||
if not cap.isOpened():
|
||||
raise RuntimeError("Cannot open uploaded video")
|
||||
frame_count = _safe_int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
fps = _safe_float(cap.get(cv2.CAP_PROP_FPS))
|
||||
width = _safe_int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = _safe_int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
|
||||
indices: List[int] = []
|
||||
if frame_count > 0:
|
||||
sample_count = min(max_samples, frame_count)
|
||||
if sample_count <= 1:
|
||||
indices = [0]
|
||||
else:
|
||||
indices = sorted({int(i * (frame_count - 1) / (sample_count - 1)) for i in range(sample_count)})
|
||||
else:
|
||||
indices = list(range(max_samples))
|
||||
|
||||
sampled: List[Tuple[int, Any]] = []
|
||||
for idx in indices:
|
||||
if frame_count > 0:
|
||||
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
||||
ok, frame = cap.read()
|
||||
if not ok or frame is None:
|
||||
continue
|
||||
sampled.append((idx, frame))
|
||||
|
||||
cap.release()
|
||||
duration = (frame_count / fps) if (frame_count > 0 and fps > 0) else None
|
||||
meta = {
|
||||
"frame_count": frame_count if frame_count > 0 else None,
|
||||
"fps": round(fps, 3) if fps > 0 else None,
|
||||
"width": width,
|
||||
"height": height,
|
||||
"duration_seconds": round(duration, 3) if duration else None,
|
||||
}
|
||||
return sampled, meta
|
||||
|
||||
|
||||
def analyze_video(path: Path) -> Dict[str, Any]:
|
||||
sampled, meta = _sample_video_frames(path, max_samples=24)
|
||||
if not sampled:
|
||||
raise RuntimeError("Cannot sample frames from uploaded video")
|
||||
|
||||
all_faces: List[Dict[str, Any]] = []
|
||||
all_plates: List[Dict[str, Any]] = []
|
||||
quality_samples: List[Dict[str, Any]] = []
|
||||
|
||||
for frame_idx, frame in sampled:
|
||||
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # type: ignore[union-attr]
|
||||
faces = _detect_faces(gray)
|
||||
plates = _detect_plates(gray)
|
||||
for f in faces:
|
||||
f["frame_index"] = frame_idx
|
||||
all_faces.append(f)
|
||||
for p in plates:
|
||||
p["frame_index"] = frame_idx
|
||||
all_plates.append(p)
|
||||
quality_samples.append(_analyze_quality(gray))
|
||||
|
||||
quality = _aggregate_quality(quality_samples)
|
||||
recs, priority = _recommendations(
|
||||
faces_count=len(all_faces),
|
||||
plates_count=len(all_plates),
|
||||
quality=quality,
|
||||
media_type="video",
|
||||
)
|
||||
width = _safe_int(meta.get("width"))
|
||||
height = _safe_int(meta.get("height"))
|
||||
frame_count = _safe_int(meta.get("frame_count"))
|
||||
|
||||
return {
|
||||
"media_type": "video",
|
||||
"frame_sampled": len(sampled),
|
||||
"video_metadata": meta,
|
||||
"faces": all_faces[:120],
|
||||
"license_plates": all_plates[:120],
|
||||
"quality_analysis": quality,
|
||||
"recommendations": recs,
|
||||
"suggested_priority": priority,
|
||||
"suggested_export": _suggested_export("video", quality, width, height),
|
||||
"estimated_processing_seconds": estimate_processing_seconds(
|
||||
media_type="video",
|
||||
mode="tactical",
|
||||
width=width,
|
||||
height=height,
|
||||
frame_count=frame_count,
|
||||
),
|
||||
}
|
||||
|
||||
254
services/aurora-service/app/job_store.py
Normal file
254
services/aurora-service/app/job_store.py
Normal file
@@ -0,0 +1,254 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import shutil
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from .schemas import AuroraJob, AuroraResult, AuroraMode, JobStatus, MediaType, ProcessingStep
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _model_dump(model: Any) -> Dict[str, Any]:
|
||||
if hasattr(model, "model_dump"):
|
||||
return model.model_dump()
|
||||
return model.dict()
|
||||
|
||||
|
||||
class JobStore:
|
||||
def __init__(self, data_dir: Path) -> None:
|
||||
self.data_dir = data_dir
|
||||
self.jobs_dir = data_dir / "jobs"
|
||||
self.uploads_dir = data_dir / "uploads"
|
||||
self.outputs_dir = data_dir / "outputs"
|
||||
|
||||
self.jobs_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.uploads_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.outputs_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
self._lock = threading.RLock()
|
||||
self._jobs: Dict[str, AuroraJob] = {}
|
||||
self._load_existing_jobs()
|
||||
|
||||
def _job_path(self, job_id: str) -> Path:
|
||||
return self.jobs_dir / f"{job_id}.json"
|
||||
|
||||
def _save_job(self, job: AuroraJob) -> None:
|
||||
self._job_path(job.job_id).write_text(
|
||||
json.dumps(_model_dump(job), ensure_ascii=False, indent=2),
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
def _load_existing_jobs(self) -> None:
|
||||
for path in sorted(self.jobs_dir.glob("*.json")):
|
||||
try:
|
||||
payload = json.loads(path.read_text(encoding="utf-8"))
|
||||
job = AuroraJob(**payload)
|
||||
self._jobs[job.job_id] = job
|
||||
except Exception as exc:
|
||||
logger.warning("Skipping unreadable job file %s: %s", path, exc)
|
||||
|
||||
def create_job(
|
||||
self,
|
||||
*,
|
||||
job_id: str,
|
||||
file_name: str,
|
||||
input_path: Path,
|
||||
input_hash: str,
|
||||
mode: AuroraMode,
|
||||
media_type: MediaType,
|
||||
created_at: str,
|
||||
metadata: Optional[Dict[str, Any]] = None,
|
||||
) -> AuroraJob:
|
||||
job = AuroraJob(
|
||||
job_id=job_id,
|
||||
file_name=file_name,
|
||||
mode=mode,
|
||||
media_type=media_type,
|
||||
input_path=str(input_path),
|
||||
input_hash=input_hash,
|
||||
created_at=created_at,
|
||||
metadata=metadata or {},
|
||||
)
|
||||
with self._lock:
|
||||
self._jobs[job_id] = job
|
||||
self._save_job(job)
|
||||
return job
|
||||
|
||||
def get_job(self, job_id: str) -> Optional[AuroraJob]:
|
||||
with self._lock:
|
||||
return self._jobs.get(job_id)
|
||||
|
||||
def list_jobs(self) -> List[AuroraJob]:
|
||||
with self._lock:
|
||||
return list(self._jobs.values())
|
||||
|
||||
def patch_job(self, job_id: str, **changes: Any) -> AuroraJob:
|
||||
with self._lock:
|
||||
current = self._jobs.get(job_id)
|
||||
if not current:
|
||||
raise KeyError(job_id)
|
||||
payload = _model_dump(current)
|
||||
payload.update(changes)
|
||||
payload["job_id"] = job_id
|
||||
updated = AuroraJob(**payload)
|
||||
self._jobs[job_id] = updated
|
||||
self._save_job(updated)
|
||||
return updated
|
||||
|
||||
def append_processing_step(self, job_id: str, step: ProcessingStep) -> AuroraJob:
|
||||
job = self.get_job(job_id)
|
||||
if not job:
|
||||
raise KeyError(job_id)
|
||||
steps = list(job.processing_log)
|
||||
steps.append(step)
|
||||
return self.patch_job(job_id, processing_log=steps)
|
||||
|
||||
def set_progress(self, job_id: str, *, progress: int, current_stage: str) -> AuroraJob:
|
||||
bounded = max(0, min(100, int(progress)))
|
||||
return self.patch_job(job_id, progress=bounded, current_stage=current_stage)
|
||||
|
||||
def mark_processing(self, job_id: str, *, started_at: str) -> AuroraJob:
|
||||
return self.patch_job(
|
||||
job_id,
|
||||
status="processing",
|
||||
progress=1,
|
||||
current_stage="dispatching",
|
||||
started_at=started_at,
|
||||
error_message=None,
|
||||
)
|
||||
|
||||
def mark_completed(self, job_id: str, *, result: AuroraResult, completed_at: str) -> AuroraJob:
|
||||
return self.patch_job(
|
||||
job_id,
|
||||
status="completed",
|
||||
progress=100,
|
||||
current_stage="completed",
|
||||
result=result,
|
||||
completed_at=completed_at,
|
||||
error_message=None,
|
||||
)
|
||||
|
||||
def mark_failed(self, job_id: str, *, message: str, completed_at: str) -> AuroraJob:
|
||||
return self.patch_job(
|
||||
job_id,
|
||||
status="failed",
|
||||
current_stage="failed",
|
||||
error_message=message,
|
||||
completed_at=completed_at,
|
||||
)
|
||||
|
||||
def request_cancel(self, job_id: str) -> AuroraJob:
|
||||
job = self.get_job(job_id)
|
||||
if not job:
|
||||
raise KeyError(job_id)
|
||||
if job.status in ("completed", "failed", "cancelled"):
|
||||
return job
|
||||
if job.status == "queued":
|
||||
return self.patch_job(
|
||||
job_id,
|
||||
status="cancelled",
|
||||
current_stage="cancelled",
|
||||
cancel_requested=True,
|
||||
progress=0,
|
||||
)
|
||||
return self.patch_job(
|
||||
job_id,
|
||||
cancel_requested=True,
|
||||
current_stage="cancelling",
|
||||
)
|
||||
|
||||
def delete_job(self, job_id: str, *, remove_artifacts: bool = True) -> bool:
|
||||
with self._lock:
|
||||
current = self._jobs.pop(job_id, None)
|
||||
if not current:
|
||||
return False
|
||||
self._job_path(job_id).unlink(missing_ok=True)
|
||||
|
||||
if remove_artifacts:
|
||||
shutil.rmtree(self.uploads_dir / job_id, ignore_errors=True)
|
||||
shutil.rmtree(self.outputs_dir / job_id, ignore_errors=True)
|
||||
return True
|
||||
|
||||
def mark_cancelled(self, job_id: str, *, completed_at: str, message: str = "Cancelled by user") -> AuroraJob:
|
||||
return self.patch_job(
|
||||
job_id,
|
||||
status="cancelled",
|
||||
current_stage="cancelled",
|
||||
cancel_requested=True,
|
||||
error_message=message,
|
||||
completed_at=completed_at,
|
||||
)
|
||||
|
||||
def count_by_status(self) -> Dict[JobStatus, int]:
|
||||
counts: Dict[JobStatus, int] = {
|
||||
"queued": 0,
|
||||
"processing": 0,
|
||||
"completed": 0,
|
||||
"failed": 0,
|
||||
"cancelled": 0,
|
||||
}
|
||||
with self._lock:
|
||||
for job in self._jobs.values():
|
||||
counts[job.status] += 1
|
||||
return counts
|
||||
|
||||
def recover_interrupted_jobs(
|
||||
self,
|
||||
*,
|
||||
completed_at: str,
|
||||
message: str,
|
||||
strategy: str = "failed",
|
||||
) -> int:
|
||||
"""Recover queued/processing jobs after service restart.
|
||||
|
||||
strategy:
|
||||
- "failed": mark as failed
|
||||
- "requeue": move back to queue for auto-retry on startup
|
||||
"""
|
||||
mode = (strategy or "failed").strip().lower()
|
||||
recovered = 0
|
||||
with self._lock:
|
||||
for job_id, current in list(self._jobs.items()):
|
||||
if current.status not in ("queued", "processing"):
|
||||
continue
|
||||
payload = _model_dump(current)
|
||||
meta = payload.get("metadata") or {}
|
||||
if not isinstance(meta, dict):
|
||||
meta = {}
|
||||
meta["recovery_count"] = int(meta.get("recovery_count", 0)) + 1
|
||||
meta["last_recovery_at"] = completed_at
|
||||
meta["last_recovery_reason"] = message
|
||||
payload["metadata"] = meta
|
||||
|
||||
if mode == "requeue":
|
||||
payload.update(
|
||||
{
|
||||
"status": "queued",
|
||||
"current_stage": "queued (recovered after restart)",
|
||||
"error_message": None,
|
||||
"started_at": None,
|
||||
"completed_at": None,
|
||||
"cancel_requested": False,
|
||||
"progress": 0,
|
||||
}
|
||||
)
|
||||
else:
|
||||
payload.update(
|
||||
{
|
||||
"status": "failed",
|
||||
"current_stage": "failed",
|
||||
"error_message": message,
|
||||
"completed_at": completed_at,
|
||||
"progress": max(1, int(payload.get("progress", 0))),
|
||||
}
|
||||
)
|
||||
payload["job_id"] = job_id
|
||||
updated = AuroraJob(**payload)
|
||||
self._jobs[job_id] = updated
|
||||
self._save_job(updated)
|
||||
recovered += 1
|
||||
return recovered
|
||||
96
services/aurora-service/app/langchain_scaffold.py
Normal file
96
services/aurora-service/app/langchain_scaffold.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Callable, Dict, List
|
||||
|
||||
|
||||
@dataclass
|
||||
class ToolSpec:
|
||||
name: str
|
||||
description: str
|
||||
handler: Callable[..., dict]
|
||||
|
||||
|
||||
@dataclass
|
||||
class SubagentSpec:
|
||||
name: str
|
||||
role: str
|
||||
tools: List[ToolSpec] = field(default_factory=list)
|
||||
|
||||
|
||||
def _todo_handler(**kwargs) -> dict:
|
||||
return {
|
||||
"status": "todo",
|
||||
"message": "Replace scaffold handler with real model/tool integration",
|
||||
"input": kwargs,
|
||||
}
|
||||
|
||||
|
||||
def build_subagent_registry() -> Dict[str, SubagentSpec]:
|
||||
"""
|
||||
LangChain-ready registry for AURORA internal subagents.
|
||||
This module intentionally keeps handlers as stubs so deployments remain safe
|
||||
until concrete model adapters are wired.
|
||||
"""
|
||||
|
||||
return {
|
||||
"clarity": SubagentSpec(
|
||||
name="Clarity",
|
||||
role="Video Enhancement Agent",
|
||||
tools=[
|
||||
ToolSpec("denoise_video", "Denoise video frames (FastDVDnet)", _todo_handler),
|
||||
ToolSpec("upscale_video", "Super-resolution (Real-ESRGAN)", _todo_handler),
|
||||
ToolSpec("interpolate_frames", "Frame interpolation (RIFE)", _todo_handler),
|
||||
ToolSpec("stabilize_video", "Video stabilization", _todo_handler),
|
||||
],
|
||||
),
|
||||
"vera": SubagentSpec(
|
||||
name="Vera",
|
||||
role="Face Restoration Agent",
|
||||
tools=[
|
||||
ToolSpec("detect_faces", "Face detection and quality checks", _todo_handler),
|
||||
ToolSpec("enhance_face", "Restore faces with GFPGAN", _todo_handler),
|
||||
ToolSpec("enhance_face_codeformer", "Alternative face restoration", _todo_handler),
|
||||
],
|
||||
),
|
||||
"echo": SubagentSpec(
|
||||
name="Echo",
|
||||
role="Audio Forensics Agent",
|
||||
tools=[
|
||||
ToolSpec("extract_audio_from_video", "Extract audio track", _todo_handler),
|
||||
ToolSpec("denoise_audio", "Audio denoise pipeline", _todo_handler),
|
||||
ToolSpec("enhance_speech", "Improve speech intelligibility", _todo_handler),
|
||||
ToolSpec("detect_deepfake_audio", "Deepfake audio heuristics", _todo_handler),
|
||||
],
|
||||
),
|
||||
"pixis": SubagentSpec(
|
||||
name="Pixis",
|
||||
role="Photo Analysis Agent",
|
||||
tools=[
|
||||
ToolSpec("denoise_photo", "Photo denoise", _todo_handler),
|
||||
ToolSpec("upscale_photo", "Photo super-resolution", _todo_handler),
|
||||
ToolSpec("restore_old_photo", "Legacy photo restoration", _todo_handler),
|
||||
ToolSpec("analyze_exif", "EXIF integrity analysis", _todo_handler),
|
||||
],
|
||||
),
|
||||
"plate_detect": SubagentSpec(
|
||||
name="PlateDetect",
|
||||
role="License Plate Detection & OCR Agent",
|
||||
tools=[
|
||||
ToolSpec("detect_plates", "YOLO-v9 license plate detection", _todo_handler),
|
||||
ToolSpec("ocr_plates", "OCR plate text (fast-plate-ocr)", _todo_handler),
|
||||
ToolSpec("enhance_plate_roi", "Real-ESRGAN plate region upscale", _todo_handler),
|
||||
ToolSpec("export_plate_report", "Export plate detections JSON", _todo_handler),
|
||||
],
|
||||
),
|
||||
"kore": SubagentSpec(
|
||||
name="Kore",
|
||||
role="Forensic Verifier Agent",
|
||||
tools=[
|
||||
ToolSpec("generate_chain_of_custody", "Generate forensic custody JSON", _todo_handler),
|
||||
ToolSpec("sign_document", "Apply cryptographic signature", _todo_handler),
|
||||
ToolSpec("integrate_with_amped", "Amped FIVE integration point", _todo_handler),
|
||||
ToolSpec("integrate_with_cognitech", "Cognitech integration point", _todo_handler),
|
||||
],
|
||||
),
|
||||
}
|
||||
198
services/aurora-service/app/orchestrator.py
Normal file
198
services/aurora-service/app/orchestrator.py
Normal file
@@ -0,0 +1,198 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Callable, List, Optional
|
||||
from urllib.parse import quote
|
||||
|
||||
from .schemas import (
|
||||
AuroraJob,
|
||||
AuroraResult,
|
||||
InputFileDescriptor,
|
||||
MediaType,
|
||||
OutputFileDescriptor,
|
||||
ProcessingStep,
|
||||
)
|
||||
from .subagents import (
|
||||
ClarityAgent,
|
||||
EchoAgent,
|
||||
KoreAgent,
|
||||
PipelineCancelledError,
|
||||
PixisAgent,
|
||||
PlateAgent,
|
||||
SubagentContext,
|
||||
VeraAgent,
|
||||
sha256_file,
|
||||
)
|
||||
|
||||
ProgressCallback = Callable[[int, str, Optional[ProcessingStep]], None]
|
||||
CancelCheck = Callable[[], bool]
|
||||
|
||||
|
||||
class JobCancelledError(RuntimeError):
|
||||
pass
|
||||
|
||||
|
||||
class AuroraOrchestrator:
|
||||
def __init__(self, outputs_root: Path, public_base_url: str) -> None:
|
||||
self.outputs_root = outputs_root
|
||||
self.public_base_url = public_base_url.rstrip("/")
|
||||
|
||||
def _build_pipeline(self, media_type: MediaType, forensic: bool, priority: str = "balanced") -> List[object]:
|
||||
if media_type == "video":
|
||||
pipeline: List[object] = [VeraAgent(), PlateAgent()]
|
||||
elif media_type == "audio":
|
||||
pipeline = [EchoAgent()]
|
||||
elif media_type == "photo":
|
||||
pipeline = [VeraAgent(), PixisAgent(), PlateAgent()]
|
||||
else:
|
||||
pipeline = [ClarityAgent()]
|
||||
|
||||
if forensic:
|
||||
pipeline.append(KoreAgent())
|
||||
return pipeline
|
||||
|
||||
def _file_url(self, job_id: str, name: str) -> str:
|
||||
return f"{self.public_base_url}/api/aurora/files/{quote(job_id)}/{quote(name)}"
|
||||
|
||||
def _artifact_type(self, path: Path, media_type: MediaType) -> str:
|
||||
lowered = path.name.lower()
|
||||
if lowered.endswith("forensic_log.json"):
|
||||
return "forensic_log"
|
||||
if lowered.endswith("forensic_signature.json"):
|
||||
return "forensic_signature"
|
||||
if "transcript" in lowered:
|
||||
return "transcript"
|
||||
if "plate_detection" in lowered:
|
||||
return "plate_detections"
|
||||
return media_type
|
||||
|
||||
def run(
|
||||
self,
|
||||
job: AuroraJob,
|
||||
progress_callback: Optional[ProgressCallback] = None,
|
||||
cancel_check: Optional[CancelCheck] = None,
|
||||
) -> AuroraResult:
|
||||
forensic_mode = job.mode == "forensic"
|
||||
meta_early = job.metadata if isinstance(job.metadata, dict) else {}
|
||||
priority_early = str(meta_early.get("priority") or "balanced").strip().lower() or "balanced"
|
||||
pipeline = self._build_pipeline(job.media_type, forensic_mode, priority_early)
|
||||
|
||||
output_dir = self.outputs_root / job.job_id
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
meta = job.metadata if isinstance(job.metadata, dict) else {}
|
||||
export_options = meta.get("export_options") if isinstance(meta.get("export_options"), dict) else {}
|
||||
priority = str(meta.get("priority") or "balanced").strip().lower() or "balanced"
|
||||
|
||||
ctx = SubagentContext(
|
||||
job_id=job.job_id,
|
||||
mode=job.mode,
|
||||
media_type=job.media_type,
|
||||
input_hash=job.input_hash,
|
||||
output_dir=output_dir,
|
||||
priority=priority,
|
||||
export_options=export_options,
|
||||
cancel_check=cancel_check,
|
||||
)
|
||||
|
||||
current_path = Path(job.input_path)
|
||||
processing_log: List[ProcessingStep] = []
|
||||
extra_artifacts: List[Path] = []
|
||||
digital_signature: Optional[str] = None
|
||||
|
||||
total = max(1, len(pipeline))
|
||||
for idx, subagent in enumerate(pipeline, start=1):
|
||||
if cancel_check and cancel_check():
|
||||
raise JobCancelledError(f"Job {job.job_id} cancelled")
|
||||
stage_from = int(((idx - 1) / total) * 95)
|
||||
stage_to = int((idx / total) * 95)
|
||||
|
||||
def _stage_progress(fraction: float, stage_label: str) -> None:
|
||||
if not progress_callback:
|
||||
return
|
||||
bounded = max(0.0, min(1.0, float(fraction)))
|
||||
progress = stage_from + int((stage_to - stage_from) * bounded)
|
||||
progress_callback(progress, stage_label, None)
|
||||
|
||||
stage_ctx = SubagentContext(
|
||||
job_id=ctx.job_id,
|
||||
mode=ctx.mode,
|
||||
media_type=ctx.media_type,
|
||||
input_hash=ctx.input_hash,
|
||||
output_dir=ctx.output_dir,
|
||||
priority=ctx.priority,
|
||||
export_options=ctx.export_options,
|
||||
cancel_check=ctx.cancel_check,
|
||||
stage_progress=_stage_progress if progress_callback else None,
|
||||
)
|
||||
|
||||
try:
|
||||
run_result = subagent.run(stage_ctx, current_path)
|
||||
except PipelineCancelledError as exc:
|
||||
raise JobCancelledError(str(exc)) from exc
|
||||
current_path = run_result.output_path
|
||||
processing_log.extend(run_result.steps)
|
||||
extra_artifacts.extend(run_result.artifacts)
|
||||
if run_result.metadata.get("digital_signature"):
|
||||
digital_signature = run_result.metadata["digital_signature"]
|
||||
|
||||
stage = run_result.steps[-1].step if run_result.steps else f"stage_{idx}"
|
||||
progress = int((idx / total) * 95)
|
||||
if progress_callback:
|
||||
for step in run_result.steps:
|
||||
progress_callback(progress, stage, step)
|
||||
|
||||
if cancel_check and cancel_check():
|
||||
raise JobCancelledError(f"Job {job.job_id} cancelled")
|
||||
|
||||
final_media = output_dir / f"aurora_result{current_path.suffix or '.bin'}"
|
||||
if current_path != final_media:
|
||||
if current_path.parent == output_dir:
|
||||
current_path.rename(final_media)
|
||||
else:
|
||||
shutil.move(str(current_path), str(final_media))
|
||||
result_hash = sha256_file(final_media)
|
||||
|
||||
output_files: List[OutputFileDescriptor] = [
|
||||
OutputFileDescriptor(
|
||||
type=job.media_type,
|
||||
name=final_media.name,
|
||||
url=self._file_url(job.job_id, final_media.name),
|
||||
hash=result_hash,
|
||||
)
|
||||
]
|
||||
|
||||
for artifact in extra_artifacts:
|
||||
output_files.append(
|
||||
OutputFileDescriptor(
|
||||
type=self._artifact_type(artifact, job.media_type),
|
||||
name=artifact.name,
|
||||
url=self._file_url(job.job_id, artifact.name),
|
||||
hash=sha256_file(artifact),
|
||||
)
|
||||
)
|
||||
|
||||
if forensic_mode and not digital_signature:
|
||||
digest = result_hash.split(":", 1)[-1][:48]
|
||||
digital_signature = f"ed25519:{digest}"
|
||||
|
||||
if progress_callback:
|
||||
progress_callback(100, "completed", None)
|
||||
|
||||
return AuroraResult(
|
||||
mode=job.mode,
|
||||
job_id=job.job_id,
|
||||
media_type=job.media_type,
|
||||
input_file=InputFileDescriptor(
|
||||
name=job.file_name,
|
||||
hash=job.input_hash,
|
||||
),
|
||||
processing_log=processing_log,
|
||||
output_files=output_files,
|
||||
digital_signature=digital_signature,
|
||||
metadata={
|
||||
"pipeline": [type(agent).__name__ for agent in pipeline],
|
||||
"forensic_mode": forensic_mode,
|
||||
"export_options": export_options,
|
||||
},
|
||||
)
|
||||
92
services/aurora-service/app/reporting.py
Normal file
92
services/aurora-service/app/reporting.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Iterable
|
||||
|
||||
from fpdf import FPDF # type: ignore[import-untyped]
|
||||
|
||||
from .schemas import AuroraJob
|
||||
|
||||
|
||||
def _line(pdf: FPDF, text: str) -> None:
|
||||
full_width = pdf.w - pdf.l_margin - pdf.r_margin
|
||||
pdf.set_x(pdf.l_margin)
|
||||
pdf.set_font("Helvetica", size=10)
|
||||
pdf.multi_cell(full_width, 5, txt=_soft_wrap_tokens(text))
|
||||
|
||||
|
||||
def _section(pdf: FPDF, title: str) -> None:
|
||||
pdf.ln(2)
|
||||
pdf.set_x(pdf.l_margin)
|
||||
pdf.set_font("Helvetica", style="B", size=12)
|
||||
full_width = pdf.w - pdf.l_margin - pdf.r_margin
|
||||
pdf.cell(full_width, 7, txt=title, ln=1)
|
||||
|
||||
|
||||
def _soft_wrap_tokens(text: str, chunk: int = 40) -> str:
|
||||
parts = []
|
||||
for token in str(text).split(" "):
|
||||
if len(token) <= chunk:
|
||||
parts.append(token)
|
||||
continue
|
||||
segments = [token[i : i + chunk] for i in range(0, len(token), chunk)]
|
||||
parts.append(" ".join(segments))
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
def _iter_output_rows(job: AuroraJob) -> Iterable[str]:
|
||||
if not job.result:
|
||||
return []
|
||||
for item in job.result.output_files:
|
||||
yield f"[{item.type}] {item.name} | {item.hash}"
|
||||
|
||||
|
||||
def generate_forensic_report_pdf(job: AuroraJob, output_path: Path) -> Path:
|
||||
if not job.result:
|
||||
raise RuntimeError("Job has no result data")
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
pdf = FPDF(unit="mm", format="A4")
|
||||
pdf.set_auto_page_break(auto=True, margin=14)
|
||||
pdf.add_page()
|
||||
|
||||
pdf.set_font("Helvetica", style="B", size=16)
|
||||
pdf.cell(0, 10, txt="Aurora Forensic Report", ln=1)
|
||||
pdf.set_font("Helvetica", size=9)
|
||||
pdf.cell(0, 5, txt="Autonomous Media Forensics Agent", ln=1)
|
||||
pdf.ln(3)
|
||||
|
||||
_section(pdf, "Case Summary")
|
||||
_line(pdf, f"Job ID: {job.job_id}")
|
||||
_line(pdf, f"Mode: {job.mode}")
|
||||
_line(pdf, f"Media Type: {job.media_type}")
|
||||
_line(pdf, f"Status: {job.status}")
|
||||
_line(pdf, f"Created At: {job.created_at}")
|
||||
_line(pdf, f"Started At: {job.started_at or '-'}")
|
||||
_line(pdf, f"Completed At: {job.completed_at or '-'}")
|
||||
_line(pdf, f"Input File: {job.file_name}")
|
||||
_line(pdf, f"Input Hash: {job.input_hash}")
|
||||
_line(pdf, f"Digital Signature: {job.result.digital_signature or '-'}")
|
||||
|
||||
_section(pdf, "Processing Log")
|
||||
if not job.result.processing_log:
|
||||
_line(pdf, "No processing steps were recorded.")
|
||||
for idx, step in enumerate(job.result.processing_log, start=1):
|
||||
_line(
|
||||
pdf,
|
||||
f"{idx}. {step.step} | agent={step.agent} | model={step.model} | time_ms={step.time_ms}",
|
||||
)
|
||||
|
||||
_section(pdf, "Output Artifacts")
|
||||
rows = list(_iter_output_rows(job))
|
||||
if not rows:
|
||||
_line(pdf, "No output artifacts available.")
|
||||
for row in rows:
|
||||
_line(pdf, row)
|
||||
|
||||
_section(pdf, "Metadata")
|
||||
for k, v in (job.result.metadata or {}).items():
|
||||
_line(pdf, f"{k}: {v}")
|
||||
|
||||
pdf.output(str(output_path))
|
||||
return output_path
|
||||
61
services/aurora-service/app/schemas.py
Normal file
61
services/aurora-service/app/schemas.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, List, Literal, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
AuroraMode = Literal["tactical", "forensic"]
|
||||
MediaType = Literal["video", "audio", "photo", "unknown"]
|
||||
JobStatus = Literal["queued", "processing", "completed", "failed", "cancelled"]
|
||||
|
||||
|
||||
class InputFileDescriptor(BaseModel):
|
||||
name: str
|
||||
hash: str
|
||||
|
||||
|
||||
class ProcessingStep(BaseModel):
|
||||
step: str
|
||||
agent: str
|
||||
model: str
|
||||
time_ms: int = 0
|
||||
details: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class OutputFileDescriptor(BaseModel):
|
||||
type: str
|
||||
name: str
|
||||
url: str
|
||||
hash: str
|
||||
|
||||
|
||||
class AuroraResult(BaseModel):
|
||||
agent: str = "Aurora"
|
||||
mode: AuroraMode
|
||||
job_id: str
|
||||
media_type: MediaType
|
||||
input_file: InputFileDescriptor
|
||||
processing_log: List[ProcessingStep] = Field(default_factory=list)
|
||||
output_files: List[OutputFileDescriptor] = Field(default_factory=list)
|
||||
digital_signature: Optional[str] = None
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class AuroraJob(BaseModel):
|
||||
job_id: str
|
||||
file_name: str
|
||||
mode: AuroraMode
|
||||
media_type: MediaType
|
||||
input_path: str
|
||||
input_hash: str
|
||||
status: JobStatus = "queued"
|
||||
progress: int = 0
|
||||
current_stage: str = "queued"
|
||||
error_message: Optional[str] = None
|
||||
cancel_requested: bool = False
|
||||
processing_log: List[ProcessingStep] = Field(default_factory=list)
|
||||
result: Optional[AuroraResult] = None
|
||||
created_at: str
|
||||
started_at: Optional[str] = None
|
||||
completed_at: Optional[str] = None
|
||||
metadata: Dict[str, Any] = Field(default_factory=dict)
|
||||
1968
services/aurora-service/app/subagents.py
Normal file
1968
services/aurora-service/app/subagents.py
Normal file
File diff suppressed because it is too large
Load Diff
19
services/aurora-service/launchd/status-launchd.sh
Executable file
19
services/aurora-service/launchd/status-launchd.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
LABEL="${AURORA_LAUNCHD_LABEL:-com.daarion.aurora}"
|
||||
DOMAIN="gui/$(id -u)"
|
||||
DATA_DIR_VALUE="${AURORA_DATA_DIR:-${HOME}/.sofiia/aurora-data}"
|
||||
LOG_OUT="${DATA_DIR_VALUE}/logs/launchd.out.log"
|
||||
LOG_ERR="${DATA_DIR_VALUE}/logs/launchd.err.log"
|
||||
|
||||
echo "[aurora-launchd] domain: ${DOMAIN}"
|
||||
echo "[aurora-launchd] label: ${LABEL}"
|
||||
echo ""
|
||||
launchctl print "${DOMAIN}/${LABEL}" || true
|
||||
echo ""
|
||||
echo "[aurora-launchd] tail stdout (${LOG_OUT})"
|
||||
tail -n 40 "${LOG_OUT}" 2>/dev/null || true
|
||||
echo ""
|
||||
echo "[aurora-launchd] tail stderr (${LOG_ERR})"
|
||||
tail -n 80 "${LOG_ERR}" 2>/dev/null || true
|
||||
15
services/aurora-service/launchd/uninstall-launchd.sh
Executable file
15
services/aurora-service/launchd/uninstall-launchd.sh
Executable file
@@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
LABEL="${AURORA_LAUNCHD_LABEL:-com.daarion.aurora}"
|
||||
DOMAIN="gui/$(id -u)"
|
||||
PLIST_PATH="${HOME}/Library/LaunchAgents/${LABEL}.plist"
|
||||
|
||||
launchctl bootout "${DOMAIN}/${LABEL}" >/dev/null 2>&1 || true
|
||||
launchctl disable "${DOMAIN}/${LABEL}" >/dev/null 2>&1 || true
|
||||
|
||||
if [ -f "${PLIST_PATH}" ]; then
|
||||
rm -f "${PLIST_PATH}"
|
||||
fi
|
||||
|
||||
echo "[aurora-launchd] removed: ${PLIST_PATH}"
|
||||
13
services/aurora-service/requirements.txt
Normal file
13
services/aurora-service/requirements.txt
Normal file
@@ -0,0 +1,13 @@
|
||||
fastapi==0.110.0
|
||||
uvicorn[standard]==0.29.0
|
||||
python-multipart==0.0.9
|
||||
pydantic==2.7.4
|
||||
langchain==0.3.19
|
||||
gfpgan==1.3.8
|
||||
realesrgan==0.3.0
|
||||
facexlib==0.3.0
|
||||
basicsr==1.4.2
|
||||
opencv-python-headless==4.10.0.84
|
||||
torch==2.5.1
|
||||
torchvision==0.20.1
|
||||
fpdf2==2.8.2
|
||||
30
services/aurora-service/setup-native-macos.sh
Executable file
30
services/aurora-service/setup-native-macos.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
ROOT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
VENV_DIR="${ROOT_DIR}/.venv-macos"
|
||||
PYTHON_BIN="${PYTHON_BIN:-python3.11}"
|
||||
cd "${ROOT_DIR}"
|
||||
|
||||
echo "[aurora-native] root: ${ROOT_DIR}"
|
||||
echo "[aurora-native] python: ${PYTHON_BIN}"
|
||||
|
||||
if ! command -v "${PYTHON_BIN}" >/dev/null 2>&1; then
|
||||
echo "[aurora-native] error: ${PYTHON_BIN} not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v ffmpeg >/dev/null 2>&1; then
|
||||
echo "[aurora-native] error: ffmpeg is required (brew install ffmpeg)"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "${VENV_DIR}" ]; then
|
||||
"${PYTHON_BIN}" -m venv "${VENV_DIR}"
|
||||
fi
|
||||
|
||||
source "${VENV_DIR}/bin/activate"
|
||||
python -m pip install --upgrade pip setuptools wheel
|
||||
python -m pip install -r "${ROOT_DIR}/requirements.txt"
|
||||
|
||||
echo "[aurora-native] setup complete: ${VENV_DIR}"
|
||||
Reference in New Issue
Block a user