New router intelligence modules (26 files): alert_ingest/store, audit_store, architecture_pressure, backlog_generator/store, cost_analyzer, data_governance, dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment, platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files), signature_state_store, sofiia_auto_router, tool_governance New services: - sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static - memory-service: integration_endpoints, integrations, voice_endpoints, static UI - aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents) - sofiia-supervisor: new supervisor service - aistalk-bridge-lite: Telegram bridge lite - calendar-service: CalDAV calendar service with reminders - mlx-stt-service / mlx-tts-service: Apple Silicon speech services - binance-bot-monitor: market monitor service - node-worker: STT/TTS memory providers New tools (9): agent_email, browser_tool, contract_tool, observability_tool, oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus, farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine, session_context, style_adapter, telemetry) Tests: 85+ test files for all new modules Made-with: Cursor
93 lines
2.9 KiB
Python
93 lines
2.9 KiB
Python
from __future__ import annotations
|
|
|
|
from pathlib import Path
|
|
from typing import Iterable
|
|
|
|
from fpdf import FPDF # type: ignore[import-untyped]
|
|
|
|
from .schemas import AuroraJob
|
|
|
|
|
|
def _line(pdf: FPDF, text: str) -> None:
|
|
full_width = pdf.w - pdf.l_margin - pdf.r_margin
|
|
pdf.set_x(pdf.l_margin)
|
|
pdf.set_font("Helvetica", size=10)
|
|
pdf.multi_cell(full_width, 5, txt=_soft_wrap_tokens(text))
|
|
|
|
|
|
def _section(pdf: FPDF, title: str) -> None:
|
|
pdf.ln(2)
|
|
pdf.set_x(pdf.l_margin)
|
|
pdf.set_font("Helvetica", style="B", size=12)
|
|
full_width = pdf.w - pdf.l_margin - pdf.r_margin
|
|
pdf.cell(full_width, 7, txt=title, ln=1)
|
|
|
|
|
|
def _soft_wrap_tokens(text: str, chunk: int = 40) -> str:
|
|
parts = []
|
|
for token in str(text).split(" "):
|
|
if len(token) <= chunk:
|
|
parts.append(token)
|
|
continue
|
|
segments = [token[i : i + chunk] for i in range(0, len(token), chunk)]
|
|
parts.append(" ".join(segments))
|
|
return " ".join(parts)
|
|
|
|
|
|
def _iter_output_rows(job: AuroraJob) -> Iterable[str]:
|
|
if not job.result:
|
|
return []
|
|
for item in job.result.output_files:
|
|
yield f"[{item.type}] {item.name} | {item.hash}"
|
|
|
|
|
|
def generate_forensic_report_pdf(job: AuroraJob, output_path: Path) -> Path:
|
|
if not job.result:
|
|
raise RuntimeError("Job has no result data")
|
|
|
|
output_path.parent.mkdir(parents=True, exist_ok=True)
|
|
pdf = FPDF(unit="mm", format="A4")
|
|
pdf.set_auto_page_break(auto=True, margin=14)
|
|
pdf.add_page()
|
|
|
|
pdf.set_font("Helvetica", style="B", size=16)
|
|
pdf.cell(0, 10, txt="Aurora Forensic Report", ln=1)
|
|
pdf.set_font("Helvetica", size=9)
|
|
pdf.cell(0, 5, txt="Autonomous Media Forensics Agent", ln=1)
|
|
pdf.ln(3)
|
|
|
|
_section(pdf, "Case Summary")
|
|
_line(pdf, f"Job ID: {job.job_id}")
|
|
_line(pdf, f"Mode: {job.mode}")
|
|
_line(pdf, f"Media Type: {job.media_type}")
|
|
_line(pdf, f"Status: {job.status}")
|
|
_line(pdf, f"Created At: {job.created_at}")
|
|
_line(pdf, f"Started At: {job.started_at or '-'}")
|
|
_line(pdf, f"Completed At: {job.completed_at or '-'}")
|
|
_line(pdf, f"Input File: {job.file_name}")
|
|
_line(pdf, f"Input Hash: {job.input_hash}")
|
|
_line(pdf, f"Digital Signature: {job.result.digital_signature or '-'}")
|
|
|
|
_section(pdf, "Processing Log")
|
|
if not job.result.processing_log:
|
|
_line(pdf, "No processing steps were recorded.")
|
|
for idx, step in enumerate(job.result.processing_log, start=1):
|
|
_line(
|
|
pdf,
|
|
f"{idx}. {step.step} | agent={step.agent} | model={step.model} | time_ms={step.time_ms}",
|
|
)
|
|
|
|
_section(pdf, "Output Artifacts")
|
|
rows = list(_iter_output_rows(job))
|
|
if not rows:
|
|
_line(pdf, "No output artifacts available.")
|
|
for row in rows:
|
|
_line(pdf, row)
|
|
|
|
_section(pdf, "Metadata")
|
|
for k, v in (job.result.metadata or {}).items():
|
|
_line(pdf, f"{k}: {v}")
|
|
|
|
pdf.output(str(output_path))
|
|
return output_path
|