feat(production): sync all modified production files to git

Includes updates across gateway, router, node-worker, memory-service,
aurora-service, swapper, sofiia-console UI and node2 infrastructure:

- gateway-bot: Dockerfile, http_api.py, druid/aistalk prompts, doc_service
- services/router: main.py, router-config.yml, fabric_metrics, memory_retrieval,
  offload_client, prompt_builder
- services/node-worker: worker.py, main.py, config.py, fabric_metrics
- services/memory-service: Dockerfile, database.py, main.py, requirements
- services/aurora-service: main.py (+399), kling.py, quality_report.py
- services/swapper-service: main.py, swapper_config_node2.yaml
- services/sofiia-console: static/index.html (console UI update)
- config: agent_registry, crewai_agents/teams, router_agents
- ops/fabric_preflight.sh: updated preflight checks
- router-config.yml, docker-compose.node2.yml: infra updates
- docs: NODA1-AGENT-ARCHITECTURE, fabric_contract updated

Made-with: Cursor
This commit is contained in:
Apple
2026-03-03 07:13:29 -08:00
parent 9aac835882
commit e9dedffa48
35 changed files with 3317 additions and 805 deletions

View File

@@ -143,6 +143,7 @@ def kling_video_enhance(
def kling_video_generate(
*,
image_b64: Optional[str] = None,
image_url: Optional[str] = None,
image_id: Optional[str] = None,
prompt: str,
@@ -165,8 +166,8 @@ def kling_video_generate(
duration: '5' or '10'.
aspect_ratio: '16:9', '9:16', '1:1'.
"""
if not image_url and not image_id:
raise ValueError("Either image_url or image_id must be provided")
if not image_b64 and not image_url and not image_id:
raise ValueError("One of image_b64 / image_url / image_id must be provided")
payload: Dict[str, Any] = {
"model": model,
@@ -177,10 +178,14 @@ def kling_video_generate(
"negative_prompt": negative_prompt,
"aspect_ratio": aspect_ratio,
}
if image_url:
payload["image"] = {"type": "url", "url": image_url}
if image_id:
payload["image"] = {"type": "id", "id": image_id}
# Current Kling endpoint expects "image" as base64 payload string.
# Keep url/id compatibility as a best-effort fallback for older gateways.
if image_b64:
payload["image"] = image_b64
elif image_url:
payload["image"] = image_url
elif image_id:
payload["image"] = image_id
if callback_url:
payload["callback_url"] = callback_url
@@ -191,6 +196,37 @@ def kling_video_generate(
)
def kling_video_generate_from_file(
*,
image_path: Path,
prompt: str,
negative_prompt: str = "noise, blur, artifacts, distortion",
model: str = "kling-v1-5",
mode: str = "pro",
duration: str = "5",
cfg_scale: float = 0.5,
aspect_ratio: str = "16:9",
callback_url: Optional[str] = None,
) -> Dict[str, Any]:
"""Generate video from a local image file by sending base64 payload."""
import base64
with image_path.open("rb") as fh:
image_b64 = base64.b64encode(fh.read()).decode()
return kling_video_generate(
image_b64=image_b64,
prompt=prompt,
negative_prompt=negative_prompt,
model=model,
mode=mode,
duration=duration,
cfg_scale=cfg_scale,
aspect_ratio=aspect_ratio,
callback_url=callback_url,
)
def kling_task_status(task_id: str) -> Dict[str, Any]:
"""Get status of any Kling task by ID."""
return _kling_request_with_fallback(
@@ -267,7 +303,12 @@ def kling_poll_until_done(
def kling_health_check() -> Dict[str, Any]:
"""Quick connectivity check — returns status dict."""
try:
resp = _kling_request("GET", "/v1/models", timeout=10)
return {"ok": True, "models": resp}
# `/v1/models` may be disabled in some accounts/regions.
# `/v1/videos/image2video` reliably returns code=0 when auth+endpoint are valid.
resp = _kling_request("GET", "/v1/videos/image2video", timeout=10)
code = resp.get("code") if isinstance(resp, dict) else None
if code not in (None, 0, "0"):
return {"ok": False, "error": f"Kling probe returned non-zero code: {code}", "probe": resp}
return {"ok": True, "probe_path": "/v1/videos/image2video", "probe": resp}
except Exception as exc:
return {"ok": False, "error": str(exc)}

View File

@@ -4,6 +4,7 @@ import asyncio
import hashlib
import json
import logging
import mimetypes
import os
import re
import shutil
@@ -13,9 +14,9 @@ from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
from fastapi import Body, FastAPI, File, Form, HTTPException, Query, UploadFile
from fastapi import Body, FastAPI, File, Form, HTTPException, Query, Request, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.responses import FileResponse, Response, StreamingResponse
from .analysis import (
analyze_photo,
@@ -47,6 +48,7 @@ MAX_CONCURRENT_JOBS = max(1, int(os.getenv("AURORA_MAX_CONCURRENT_JOBS", "1")))
store = JobStore(DATA_DIR)
orchestrator = AuroraOrchestrator(store.outputs_dir, PUBLIC_BASE_URL)
RUN_SLOT = asyncio.Semaphore(MAX_CONCURRENT_JOBS)
KLING_VIDEO2VIDEO_CAPABLE: Optional[bool] = None
app = FastAPI(
title="Aurora Media Forensics Service",
@@ -228,7 +230,18 @@ def _enqueue_job_from_path(
upload_dir = store.uploads_dir / job_id
upload_dir.mkdir(parents=True, exist_ok=True)
input_path = upload_dir / safe_filename(file_name)
shutil.copy2(source_path, input_path)
trim_info: Optional[Dict[str, float]] = None
if media_type == "video":
trim_info = _video_trim_window(export_options)
if trim_info:
_trim_video_input(
source_path,
input_path,
start_sec=float(trim_info.get("start_sec") or 0.0),
duration_sec=trim_info.get("duration_sec"),
)
else:
shutil.copy2(source_path, input_path)
input_hash = compute_sha256(input_path)
initial_metadata = _estimate_upload_metadata(
@@ -238,6 +251,8 @@ def _enqueue_job_from_path(
)
if export_options:
initial_metadata["export_options"] = export_options
if trim_info:
initial_metadata["clip"] = trim_info
initial_metadata["priority"] = priority
if metadata_patch:
initial_metadata.update(metadata_patch)
@@ -408,6 +423,110 @@ def _parse_export_options(raw_value: str) -> Dict[str, Any]:
return parsed
def _opt_float(opts: Dict[str, Any], key: str) -> Optional[float]:
raw = opts.get(key)
if raw is None or raw == "":
return None
try:
return float(raw)
except Exception:
raise HTTPException(status_code=422, detail=f"export_options.{key} must be a number")
def _video_trim_window(export_options: Dict[str, Any]) -> Optional[Dict[str, float]]:
opts = export_options if isinstance(export_options, dict) else {}
start = _opt_float(opts, "clip_start_sec")
duration = _opt_float(opts, "clip_duration_sec")
if start is None:
start = _opt_float(opts, "start_sec")
if duration is None:
duration = _opt_float(opts, "duration_sec")
if start is None and duration is None:
return None
start_val = float(start or 0.0)
duration_val = float(duration) if duration is not None else None
if start_val < 0:
raise HTTPException(status_code=422, detail="clip_start_sec must be >= 0")
if duration_val is not None and duration_val <= 0:
raise HTTPException(status_code=422, detail="clip_duration_sec must be > 0")
return {
"start_sec": round(start_val, 3),
"duration_sec": round(duration_val, 3) if duration_val is not None else None, # type: ignore[arg-type]
}
def _trim_video_input(source_path: Path, target_path: Path, *, start_sec: float, duration_sec: Optional[float]) -> None:
"""Trim video to a focused segment for faster iteration.
First attempt is stream copy (lossless, fast). If that fails for container/codec reasons,
fallback to lightweight re-encode.
"""
cmd = [
"ffmpeg",
"-hide_banner",
"-loglevel",
"error",
"-y",
]
if start_sec > 0:
cmd.extend(["-ss", f"{start_sec:.3f}"])
cmd.extend(["-i", str(source_path)])
if duration_sec is not None:
cmd.extend(["-t", f"{duration_sec:.3f}"])
cmd.extend([
"-map",
"0:v:0",
"-map",
"0:a?",
"-c",
"copy",
"-movflags",
"+faststart",
str(target_path),
])
proc = subprocess.run(cmd, capture_output=True, text=True, check=False)
if proc.returncode == 0 and target_path.exists() and target_path.stat().st_size > 0:
return
fallback = [
"ffmpeg",
"-hide_banner",
"-loglevel",
"error",
"-y",
]
if start_sec > 0:
fallback.extend(["-ss", f"{start_sec:.3f}"])
fallback.extend(["-i", str(source_path)])
if duration_sec is not None:
fallback.extend(["-t", f"{duration_sec:.3f}"])
fallback.extend(
[
"-map",
"0:v:0",
"-map",
"0:a?",
"-c:v",
"libx264",
"-preset",
"veryfast",
"-crf",
"17",
"-c:a",
"aac",
"-b:a",
"192k",
"-movflags",
"+faststart",
str(target_path),
]
)
proc2 = subprocess.run(fallback, capture_output=True, text=True, check=False)
if proc2.returncode != 0 or not target_path.exists() or target_path.stat().st_size <= 0:
err = (proc2.stderr or proc.stderr or "").strip()[:280]
raise HTTPException(status_code=422, detail=f"video trim failed: {err or 'ffmpeg error'}")
def _status_timing(job: Any) -> Dict[str, Optional[int]]:
started = _parse_iso_utc(job.started_at)
if not started:
@@ -1134,14 +1253,156 @@ async def cleanup_storage(
@app.get("/api/aurora/files/{job_id}/{file_name}")
async def download_output_file(job_id: str, file_name: str) -> FileResponse:
async def download_output_file(job_id: str, file_name: str, request: Request):
base = (store.outputs_dir / job_id).resolve()
target = (base / file_name).resolve()
if not str(target).startswith(str(base)):
raise HTTPException(status_code=403, detail="invalid file path")
if not target.exists() or not target.is_file():
raise HTTPException(status_code=404, detail="file not found")
return FileResponse(path=target, filename=target.name)
total_size = target.stat().st_size
range_header = request.headers.get("range")
if not range_header:
return FileResponse(
path=target,
filename=target.name,
headers={"Accept-Ranges": "bytes"},
)
parsed = _parse_range_header(range_header, total_size)
if parsed is None:
return FileResponse(
path=target,
filename=target.name,
headers={"Accept-Ranges": "bytes"},
)
start, end = parsed
if start >= total_size:
return Response(
status_code=416,
headers={"Content-Range": f"bytes */{total_size}", "Accept-Ranges": "bytes"},
)
content_length = (end - start) + 1
media_type = mimetypes.guess_type(str(target))[0] or "application/octet-stream"
def _iter_range():
with target.open("rb") as fh:
fh.seek(start)
remaining = content_length
while remaining > 0:
chunk = fh.read(min(65536, remaining))
if not chunk:
break
remaining -= len(chunk)
yield chunk
return StreamingResponse(
_iter_range(),
status_code=206,
media_type=media_type,
headers={
"Content-Range": f"bytes {start}-{end}/{total_size}",
"Content-Length": str(content_length),
"Accept-Ranges": "bytes",
"Content-Disposition": f'attachment; filename="{target.name}"',
},
)
def _parse_range_header(range_header: str, total_size: int) -> Optional[tuple[int, int]]:
value = str(range_header or "").strip()
if not value.lower().startswith("bytes="):
return None
spec = value.split("=", 1)[1].strip()
if "," in spec:
return None
if "-" not in spec:
return None
start_txt, end_txt = spec.split("-", 1)
try:
if start_txt == "":
# Suffix range: bytes=-N
suffix_len = int(end_txt)
if suffix_len <= 0:
return None
if suffix_len >= total_size:
return 0, max(0, total_size - 1)
return total_size - suffix_len, total_size - 1
start = int(start_txt)
if start < 0:
return None
if end_txt == "":
end = total_size - 1
else:
end = int(end_txt)
if end < start:
return None
return start, min(end, max(0, total_size - 1))
except Exception:
return None
def _extract_first_video_frame(video_path: Path, output_path: Path) -> Path:
"""Extract the first decodable video frame to an image file."""
try:
import cv2 # type: ignore[import-untyped]
except Exception as exc:
raise RuntimeError("OpenCV is required for Kling image2video fallback.") from exc
output_path.parent.mkdir(parents=True, exist_ok=True)
cap = cv2.VideoCapture(str(video_path))
try:
if not cap.isOpened():
raise RuntimeError(f"Cannot open video for fallback frame extraction: {video_path}")
ok, frame = cap.read()
if not ok or frame is None:
raise RuntimeError("Could not read first frame from video")
if not cv2.imwrite(str(output_path), frame):
raise RuntimeError(f"Failed to write fallback frame: {output_path}")
finally:
cap.release()
return output_path
def _resolve_kling_result_url(task_data: Dict[str, Any]) -> Optional[str]:
if not isinstance(task_data, dict):
return None
task_result = task_data.get("task_result")
if isinstance(task_result, dict):
videos = task_result.get("videos")
if isinstance(videos, list):
for item in videos:
if not isinstance(item, dict):
continue
for key in ("url", "video_url", "play_url", "download_url"):
value = item.get(key)
if isinstance(value, str) and value:
return value
elif isinstance(videos, dict):
for key in ("url", "video_url", "play_url", "download_url"):
value = videos.get(key)
if isinstance(value, str) and value:
return value
for key in ("url", "video_url", "play_url", "download_url", "result_url"):
value = task_result.get(key)
if isinstance(value, str) and value:
return value
for key in ("kling_result_url", "result_url", "video_url", "url"):
value = task_data.get(key)
if isinstance(value, str) and value:
return value
return None
def _compact_error_text(err: Any, limit: int = 220) -> str:
text = re.sub(r"\s+", " ", str(err)).strip()
return text[:limit]
# ── Kling AI endpoints ────────────────────────────────────────────────────────
@@ -1163,7 +1424,7 @@ async def kling_enhance_video(
cfg_scale: float = Form(0.5, description="Prompt adherence 0.0-1.0"),
) -> Dict[str, Any]:
"""Submit Aurora job result to Kling AI for video-to-video enhancement."""
from .kling import kling_video_enhance, kling_upload_file
from .kling import kling_video_enhance, kling_upload_file, kling_video_generate_from_file
job = store.get_job(job_id)
if not job:
@@ -1181,45 +1442,97 @@ async def kling_enhance_video(
if not result_path.exists():
raise HTTPException(status_code=404, detail="Result file not found for this job")
try:
upload_resp = kling_upload_file(result_path)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Kling upload error: {str(exc)[:400]}") from exc
file_id = (upload_resp.get("data") or {}).get("resource_id") or (upload_resp.get("data") or {}).get("file_id")
global KLING_VIDEO2VIDEO_CAPABLE
if not file_id:
raise HTTPException(status_code=502, detail=f"Kling upload failed: {upload_resp}")
task_resp: Optional[Dict[str, Any]] = None
file_id: Optional[str] = None
kling_endpoint = "video2video"
video2video_error: Optional[str] = None
fallback_frame_name: Optional[str] = None
# Primary path: upload + video2video.
if KLING_VIDEO2VIDEO_CAPABLE is not False:
try:
upload_resp = kling_upload_file(result_path)
file_id = (upload_resp.get("data") or {}).get("resource_id") or (upload_resp.get("data") or {}).get("file_id")
if not file_id:
raise RuntimeError(f"Kling upload failed: {upload_resp}")
task_resp = kling_video_enhance(
video_id=file_id,
prompt=prompt,
negative_prompt=negative_prompt,
mode=mode,
duration=duration,
cfg_scale=cfg_scale,
)
KLING_VIDEO2VIDEO_CAPABLE = True
except Exception as exc:
raw_error = str(exc)
video2video_error = _compact_error_text(raw_error, limit=220)
logger.warning("kling video2video unavailable for %s: %s", job_id, video2video_error)
lower_error = raw_error.lower()
if "endpoint mismatch" in lower_error or "404" in lower_error:
KLING_VIDEO2VIDEO_CAPABLE = False
else:
video2video_error = "video2video skipped (previous endpoint mismatch)"
# Fallback path: extract first frame and run image2video (base64 payload).
if task_resp is None:
try:
frame_path = _extract_first_video_frame(
result_path,
store.outputs_dir / job_id / "_kling_fallback_frame.jpg",
)
fallback_frame_name = frame_path.name
task_resp = kling_video_generate_from_file(
image_path=frame_path,
prompt=prompt,
negative_prompt=negative_prompt,
mode=mode,
duration=duration,
cfg_scale=cfg_scale,
aspect_ratio="16:9",
)
kling_endpoint = "image2video"
except Exception as fallback_exc:
detail = "Kling submit failed"
if video2video_error:
detail = f"Kling video2video error: {video2video_error}; image2video fallback error: {_compact_error_text(fallback_exc, limit=220)}"
else:
detail = f"Kling image2video fallback error: {_compact_error_text(fallback_exc, limit=220)}"
raise HTTPException(status_code=502, detail=detail) from fallback_exc
if task_resp is None:
raise HTTPException(status_code=502, detail="Kling task submit failed: empty response")
try:
task_resp = kling_video_enhance(
video_id=file_id,
prompt=prompt,
negative_prompt=negative_prompt,
mode=mode,
duration=duration,
cfg_scale=cfg_scale,
)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Kling task submit error: {str(exc)[:400]}") from exc
task_id = (task_resp.get("data") or {}).get("task_id") or task_resp.get("task_id")
if not task_id:
raise HTTPException(status_code=502, detail=f"Kling task_id missing in response: {task_resp}")
kling_meta_dir = store.outputs_dir / job_id
kling_meta_path = kling_meta_dir / "kling_task.json"
kling_meta_path.write_text(json.dumps({
meta_payload: Dict[str, Any] = {
"aurora_job_id": job_id,
"kling_task_id": task_id,
"kling_file_id": file_id,
"kling_endpoint": kling_endpoint,
"prompt": prompt,
"mode": mode,
"duration": duration,
"submitted_at": datetime.now(timezone.utc).isoformat(),
"status": "submitted",
}, ensure_ascii=False, indent=2), encoding="utf-8")
}
if fallback_frame_name:
meta_payload["kling_source_frame"] = fallback_frame_name
if video2video_error:
meta_payload["video2video_error"] = video2video_error
kling_meta_path.write_text(json.dumps(meta_payload, ensure_ascii=False, indent=2), encoding="utf-8")
return {
"aurora_job_id": job_id,
"kling_task_id": task_id,
"kling_file_id": file_id,
"kling_endpoint": kling_endpoint,
"status": "submitted",
"status_url": f"/api/aurora/kling/status/{job_id}",
}
@@ -1238,9 +1551,10 @@ async def kling_task_status_for_job(job_id: str) -> Dict[str, Any]:
task_id = meta.get("kling_task_id")
if not task_id:
raise HTTPException(status_code=404, detail="Kling task_id missing in metadata")
endpoint = str(meta.get("kling_endpoint") or "video2video")
try:
status_resp = kling_video_task_status(task_id, endpoint="video2video")
status_resp = kling_video_task_status(task_id, endpoint=endpoint)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Kling status error: {str(exc)[:400]}") from exc
task_data = status_resp.get("data") or status_resp
@@ -1249,19 +1563,17 @@ async def kling_task_status_for_job(job_id: str) -> Dict[str, Any]:
meta["status"] = state
meta["last_checked"] = datetime.now(timezone.utc).isoformat()
result_url = None
works = task_data.get("task_result", {}).get("videos") or []
if works:
result_url = works[0].get("url")
if result_url:
meta["kling_result_url"] = result_url
meta["completed_at"] = datetime.now(timezone.utc).isoformat()
result_url = _resolve_kling_result_url(task_data)
if result_url:
meta["kling_result_url"] = result_url
meta["completed_at"] = datetime.now(timezone.utc).isoformat()
kling_meta_path.write_text(json.dumps(meta, ensure_ascii=False, indent=2), encoding="utf-8")
return {
"aurora_job_id": job_id,
"kling_task_id": task_id,
"kling_endpoint": endpoint,
"status": state,
"kling_result_url": result_url,
"meta": meta,
@@ -1279,7 +1591,7 @@ async def kling_image_to_video(
aspect_ratio: str = Form("16:9"),
) -> Dict[str, Any]:
"""Generate video from a still image using Kling AI."""
from .kling import kling_upload_file, kling_video_generate
from .kling import kling_video_generate_from_file
file_name = file.filename or "frame.jpg"
content = await file.read()
@@ -1293,16 +1605,8 @@ async def kling_image_to_video(
try:
try:
upload_resp = kling_upload_file(tmp_path)
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Kling upload error: {str(exc)[:400]}") from exc
file_id = (upload_resp.get("data") or {}).get("resource_id") or (upload_resp.get("data") or {}).get("file_id")
if not file_id:
raise HTTPException(status_code=502, detail=f"Kling upload failed: {upload_resp}")
try:
task_resp = kling_video_generate(
image_id=file_id,
task_resp = kling_video_generate_from_file(
image_path=tmp_path,
prompt=prompt,
negative_prompt=negative_prompt,
model=model,
@@ -1313,9 +1617,12 @@ async def kling_image_to_video(
except Exception as exc:
raise HTTPException(status_code=502, detail=f"Kling task submit error: {str(exc)[:400]}") from exc
task_id = (task_resp.get("data") or {}).get("task_id") or task_resp.get("task_id")
if not task_id:
raise HTTPException(status_code=502, detail=f"Kling task_id missing in response: {task_resp}")
return {
"kling_task_id": task_id,
"kling_file_id": file_id,
"kling_file_id": None,
"kling_endpoint": "image2video",
"status": "submitted",
"status_url": f"/api/aurora/kling/task/{task_id}?endpoint=image2video",
}

View File

@@ -49,6 +49,78 @@ def _models_used(job: AuroraJob) -> List[str]:
return models
def _processing_steps(job: AuroraJob) -> List[Any]:
if job.result and job.result.processing_log:
return list(job.result.processing_log)
if job.processing_log:
return list(job.processing_log)
return []
def _result_media_hash(job: AuroraJob) -> Optional[str]:
if not job.result:
return None
media_type = str(job.media_type).strip().lower()
for out in job.result.output_files:
out_type = str(getattr(out, "type", "") or "").strip().lower()
if out_type in {media_type, "video", "photo", "image", "audio", "unknown"}:
value = str(getattr(out, "hash", "") or "").strip()
if value:
return value
return None
def _fallback_flags(job: AuroraJob) -> Dict[str, Any]:
hard_fallback_used = False
soft_sr_fallback_used = False
fallback_steps: List[str] = []
warnings: List[str] = []
for step in _processing_steps(job):
step_name = str(getattr(step, "step", "") or "").strip() or "unknown"
details = getattr(step, "details", {}) or {}
if not isinstance(details, dict):
continue
if bool(details.get("fallback_used")):
hard_fallback_used = True
fallback_steps.append(step_name)
reason = str(details.get("reason") or "").strip()
if reason:
warnings.append(f"{step_name}: hard fallback used ({reason})")
else:
warnings.append(f"{step_name}: hard fallback used")
sr_fallback_frames = 0
try:
sr_fallback_frames = int(details.get("sr_fallback_frames") or 0)
except Exception:
sr_fallback_frames = 0
if bool(details.get("sr_fallback_used")):
sr_fallback_frames = max(sr_fallback_frames, 1)
if sr_fallback_frames > 0:
soft_sr_fallback_used = True
fallback_steps.append(step_name)
method = str(details.get("sr_fallback_method") or "").strip()
reason = str(details.get("sr_fallback_reason") or "").strip()
msg = f"{step_name}: SR soft fallback on {sr_fallback_frames} frame(s)"
if method:
msg += f" via {method}"
if reason:
msg += f" ({reason})"
warnings.append(msg)
fallback_steps_unique = list(dict.fromkeys(fallback_steps))
warnings_unique = list(dict.fromkeys(warnings))
return {
"fallback_used": bool(hard_fallback_used or soft_sr_fallback_used),
"hard_fallback_used": hard_fallback_used,
"soft_sr_fallback_used": soft_sr_fallback_used,
"fallback_steps": fallback_steps_unique,
"warnings": warnings_unique,
}
def _detect_faces_with_proxy_confidence(frame_bgr: Any) -> List[Dict[str, Any]]:
if cv2 is None:
return []
@@ -246,9 +318,29 @@ def build_quality_report(job: AuroraJob, outputs_dir: Path, *, refresh: bool = F
raise RuntimeError("Cannot build quality report: source/result file not found")
media_type: MediaType = job.media_type
processing_flags = _fallback_flags(job)
faces = _face_metrics(source_path, result_path, media_type)
plates = _plate_metrics(job_dir)
overall = _overall_metrics(source_path, result_path, media_type, job)
result_hash = _result_media_hash(job)
identical_to_input = bool(result_hash and result_hash == str(job.input_hash))
warnings = list(processing_flags.get("warnings") or [])
if identical_to_input:
warnings.append("output hash matches input hash; enhancement may be skipped.")
warnings = list(dict.fromkeys(warnings))
processing_status = "ok"
if bool(processing_flags.get("fallback_used")) or identical_to_input:
processing_status = "degraded"
overall["processing_status"] = processing_status
overall["fallback_used"] = bool(processing_flags.get("fallback_used"))
overall["hard_fallback_used"] = bool(processing_flags.get("hard_fallback_used"))
overall["soft_sr_fallback_used"] = bool(processing_flags.get("soft_sr_fallback_used"))
overall["identical_to_input"] = identical_to_input
if result_hash:
overall["result_hash"] = result_hash
if warnings:
overall["warnings"] = warnings
report = {
"job_id": job.job_id,
@@ -257,7 +349,13 @@ def build_quality_report(job: AuroraJob, outputs_dir: Path, *, refresh: bool = F
"faces": faces,
"plates": plates,
"overall": overall,
"processing_flags": {
**processing_flags,
"identical_to_input": identical_to_input,
"warnings": warnings,
},
"summary": {
"processing_status": processing_status,
"faces_detected_ratio": f"{faces['detected']} / {faces['source_detected'] or faces['detected']}",
"plates_recognized_ratio": f"{plates['recognized']} / {plates['detected']}",
},