New router intelligence modules (26 files): alert_ingest/store, audit_store, architecture_pressure, backlog_generator/store, cost_analyzer, data_governance, dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment, platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files), signature_state_store, sofiia_auto_router, tool_governance New services: - sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static - memory-service: integration_endpoints, integrations, voice_endpoints, static UI - aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents) - sofiia-supervisor: new supervisor service - aistalk-bridge-lite: Telegram bridge lite - calendar-service: CalDAV calendar service with reminders - mlx-stt-service / mlx-tts-service: Apple Silicon speech services - binance-bot-monitor: market monitor service - node-worker: STT/TTS memory providers New tools (9): agent_email, browser_tool, contract_tool, observability_tool, oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus, farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine, session_context, style_adapter, telemetry) Tests: 85+ test files for all new modules Made-with: Cursor
158 lines
5.2 KiB
Python
158 lines
5.2 KiB
Python
"""
|
|
Sofiia Supervisor — State Backend
|
|
|
|
Supports:
|
|
- redis: production (requires redis-py)
|
|
- memory: in-process dict (testing / single-instance dev)
|
|
|
|
Redis schema:
|
|
run:{run_id} → JSON (RunRecord without events)
|
|
run:{run_id}:events → Redis list of JSON RunEvent
|
|
TTL: RUN_TTL_SEC (default 24h)
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
|
|
import json
|
|
import logging
|
|
from abc import ABC, abstractmethod
|
|
from typing import List, Optional
|
|
|
|
from .config import settings
|
|
from .models import RunEvent, RunRecord, RunStatus
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class StateBackend(ABC):
|
|
@abstractmethod
|
|
async def save_run(self, run: RunRecord) -> None: ...
|
|
|
|
@abstractmethod
|
|
async def get_run(self, run_id: str) -> Optional[RunRecord]: ...
|
|
|
|
@abstractmethod
|
|
async def append_event(self, run_id: str, event: RunEvent) -> None: ...
|
|
|
|
@abstractmethod
|
|
async def get_events(self, run_id: str) -> List[RunEvent]: ...
|
|
|
|
@abstractmethod
|
|
async def cancel_run(self, run_id: str) -> bool: ...
|
|
|
|
|
|
# ─── In-memory backend (testing/dev) ─────────────────────────────────────────
|
|
|
|
class MemoryStateBackend(StateBackend):
|
|
def __init__(self):
|
|
self._runs: dict[str, RunRecord] = {}
|
|
self._events: dict[str, list[RunEvent]] = {}
|
|
|
|
async def save_run(self, run: RunRecord) -> None:
|
|
self._runs[run.run_id] = run
|
|
|
|
async def get_run(self, run_id: str) -> Optional[RunRecord]:
|
|
return self._runs.get(run_id)
|
|
|
|
async def append_event(self, run_id: str, event: RunEvent) -> None:
|
|
self._events.setdefault(run_id, []).append(event)
|
|
|
|
async def get_events(self, run_id: str) -> List[RunEvent]:
|
|
return list(self._events.get(run_id, []))
|
|
|
|
async def cancel_run(self, run_id: str) -> bool:
|
|
run = self._runs.get(run_id)
|
|
if not run:
|
|
return False
|
|
if run.status in (RunStatus.SUCCEEDED, RunStatus.FAILED, RunStatus.CANCELLED):
|
|
return False
|
|
run.status = RunStatus.CANCELLED
|
|
return True
|
|
|
|
|
|
# ─── Redis backend (production) ──────────────────────────────────────────────
|
|
|
|
class RedisStateBackend(StateBackend):
|
|
def __init__(self):
|
|
self._redis = None
|
|
|
|
async def _client(self):
|
|
if self._redis is None:
|
|
try:
|
|
import redis.asyncio as aioredis
|
|
self._redis = await aioredis.from_url(
|
|
settings.REDIS_URL,
|
|
decode_responses=True,
|
|
)
|
|
except Exception as e:
|
|
logger.error(f"Redis connection error: {e}")
|
|
raise
|
|
return self._redis
|
|
|
|
def _run_key(self, run_id: str) -> str:
|
|
return f"run:{run_id}"
|
|
|
|
def _events_key(self, run_id: str) -> str:
|
|
return f"run:{run_id}:events"
|
|
|
|
async def save_run(self, run: RunRecord) -> None:
|
|
r = await self._client()
|
|
# Store run without events (events stored separately in list)
|
|
data = run.model_dump(exclude={"events"})
|
|
await r.setex(
|
|
self._run_key(run.run_id),
|
|
settings.RUN_TTL_SEC,
|
|
json.dumps(data, default=str),
|
|
)
|
|
|
|
async def get_run(self, run_id: str) -> Optional[RunRecord]:
|
|
r = await self._client()
|
|
raw = await r.get(self._run_key(run_id))
|
|
if not raw:
|
|
return None
|
|
try:
|
|
data = json.loads(raw)
|
|
events = await self.get_events(run_id)
|
|
data["events"] = [e.model_dump() for e in events]
|
|
return RunRecord(**data)
|
|
except Exception as e:
|
|
logger.error(f"Deserialise run {run_id}: {e}")
|
|
return None
|
|
|
|
async def append_event(self, run_id: str, event: RunEvent) -> None:
|
|
r = await self._client()
|
|
key = self._events_key(run_id)
|
|
await r.rpush(key, json.dumps(event.model_dump(), default=str))
|
|
await r.expire(key, settings.RUN_TTL_SEC)
|
|
|
|
async def get_events(self, run_id: str) -> List[RunEvent]:
|
|
r = await self._client()
|
|
raw_list = await r.lrange(self._events_key(run_id), 0, -1)
|
|
events = []
|
|
for raw in raw_list:
|
|
try:
|
|
events.append(RunEvent(**json.loads(raw)))
|
|
except Exception:
|
|
pass
|
|
return events
|
|
|
|
async def cancel_run(self, run_id: str) -> bool:
|
|
run = await self.get_run(run_id)
|
|
if not run:
|
|
return False
|
|
if run.status in (RunStatus.SUCCEEDED, RunStatus.FAILED, RunStatus.CANCELLED):
|
|
return False
|
|
run.status = RunStatus.CANCELLED
|
|
await self.save_run(run)
|
|
return True
|
|
|
|
|
|
# ─── Factory ─────────────────────────────────────────────────────────────────
|
|
|
|
def create_state_backend() -> StateBackend:
|
|
if settings.STATE_BACKEND == "redis":
|
|
logger.info("Using Redis state backend")
|
|
return RedisStateBackend()
|
|
logger.info("Using in-memory state backend")
|
|
return MemoryStateBackend()
|