New router intelligence modules (26 files): alert_ingest/store, audit_store, architecture_pressure, backlog_generator/store, cost_analyzer, data_governance, dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment, platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files), signature_state_store, sofiia_auto_router, tool_governance New services: - sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static - memory-service: integration_endpoints, integrations, voice_endpoints, static UI - aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents) - sofiia-supervisor: new supervisor service - aistalk-bridge-lite: Telegram bridge lite - calendar-service: CalDAV calendar service with reminders - mlx-stt-service / mlx-tts-service: Apple Silicon speech services - binance-bot-monitor: market monitor service - node-worker: STT/TTS memory providers New tools (9): agent_email, browser_tool, contract_tool, observability_tool, oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus, farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine, session_context, style_adapter, telemetry) Tests: 85+ test files for all new modules Made-with: Cursor
169 lines
6.7 KiB
Python
169 lines
6.7 KiB
Python
"""
|
|
Tests for incident_followups_summary action and followup event schema.
|
|
"""
|
|
import os
|
|
import sys
|
|
import json
|
|
import tempfile
|
|
from datetime import datetime, timedelta
|
|
from pathlib import Path
|
|
from unittest.mock import patch
|
|
|
|
ROOT = Path(__file__).resolve().parent.parent
|
|
ROUTER = ROOT / "services" / "router"
|
|
if str(ROUTER) not in sys.path:
|
|
sys.path.insert(0, str(ROUTER))
|
|
|
|
|
|
class TestFollowupSummary:
|
|
"""Tests for oncall_tool incident_followups_summary using MemoryIncidentStore."""
|
|
|
|
def setup_method(self):
|
|
from incident_store import MemoryIncidentStore, set_incident_store
|
|
self.store = MemoryIncidentStore()
|
|
set_incident_store(self.store)
|
|
|
|
def teardown_method(self):
|
|
from incident_store import set_incident_store
|
|
set_incident_store(None)
|
|
|
|
def _create_incident(self, service="gateway", severity="P1", status="open"):
|
|
return self.store.create_incident({
|
|
"service": service,
|
|
"severity": severity,
|
|
"title": f"Test {severity} incident",
|
|
"started_at": datetime.utcnow().isoformat(),
|
|
})
|
|
|
|
def _add_followup(self, incident_id, title="Fix config", priority="P1",
|
|
due_date=None, status="open"):
|
|
if due_date is None:
|
|
due_date = (datetime.utcnow() - timedelta(days=1)).isoformat()
|
|
self.store.append_event(
|
|
incident_id,
|
|
"followup",
|
|
title,
|
|
meta={
|
|
"title": title,
|
|
"owner": "sofiia",
|
|
"priority": priority,
|
|
"due_date": due_date,
|
|
"status": status,
|
|
},
|
|
)
|
|
|
|
def test_open_p1_incident_appears_in_summary(self):
|
|
inc = self._create_incident(severity="P1", status="open")
|
|
summary = self._get_summary(service="gateway")
|
|
assert summary["stats"]["open_incidents"] >= 1
|
|
assert any(i["id"] == inc["id"] for i in summary["open_incidents"])
|
|
|
|
def test_p3_incident_not_in_critical(self):
|
|
self._create_incident(severity="P3", status="open")
|
|
summary = self._get_summary(service="gateway")
|
|
assert summary["stats"]["open_incidents"] == 0
|
|
|
|
def test_closed_incident_not_in_open(self):
|
|
inc = self._create_incident(severity="P1", status="open")
|
|
self.store.close_incident(inc["id"], datetime.utcnow().isoformat(), "Fixed")
|
|
summary = self._get_summary(service="gateway")
|
|
assert not any(i["id"] == inc["id"] for i in summary["open_incidents"])
|
|
|
|
def test_overdue_followup_detected(self):
|
|
inc = self._create_incident()
|
|
yesterday = (datetime.utcnow() - timedelta(days=1)).isoformat()
|
|
self._add_followup(inc["id"], title="Upgrade deps", due_date=yesterday)
|
|
summary = self._get_summary(service="gateway")
|
|
assert summary["stats"]["overdue"] >= 1
|
|
assert any(f["title"] == "Upgrade deps" for f in summary["overdue_followups"])
|
|
|
|
def test_future_followup_not_overdue(self):
|
|
inc = self._create_incident()
|
|
future = (datetime.utcnow() + timedelta(days=7)).isoformat()
|
|
self._add_followup(inc["id"], title="Future task", due_date=future)
|
|
summary = self._get_summary(service="gateway")
|
|
assert summary["stats"]["overdue"] == 0
|
|
|
|
def test_done_followup_not_overdue(self):
|
|
inc = self._create_incident()
|
|
yesterday = (datetime.utcnow() - timedelta(days=1)).isoformat()
|
|
self._add_followup(inc["id"], title="Done task", due_date=yesterday, status="done")
|
|
summary = self._get_summary(service="gateway")
|
|
assert summary["stats"]["overdue"] == 0
|
|
|
|
def test_total_open_followups_counted(self):
|
|
inc = self._create_incident()
|
|
future = (datetime.utcnow() + timedelta(days=7)).isoformat()
|
|
self._add_followup(inc["id"], title="Task A", due_date=future)
|
|
self._add_followup(inc["id"], title="Task B", due_date=future)
|
|
self._add_followup(inc["id"], title="Task C done", due_date=future, status="done")
|
|
summary = self._get_summary(service="gateway")
|
|
assert summary["stats"]["total_open_followups"] >= 2
|
|
|
|
def test_filter_by_env(self):
|
|
self._create_incident(service="gateway", severity="P1")
|
|
summary_any = self._get_summary(service="gateway", env="any")
|
|
assert summary_any["stats"]["open_incidents"] >= 1
|
|
|
|
def _get_summary(self, service="gateway", env="any", window_days=30):
|
|
"""Helper: call the followups_summary logic directly via the store."""
|
|
from datetime import datetime as _dt, timedelta as _td
|
|
incidents = self.store.list_incidents(
|
|
{"service": service} if service else {},
|
|
limit=100,
|
|
)
|
|
now_dt = _dt.utcnow()
|
|
if window_days > 0:
|
|
cutoff = now_dt - _td(days=window_days)
|
|
incidents = [i for i in incidents
|
|
if i.get("created_at", "") >= cutoff.isoformat()]
|
|
|
|
open_critical = [
|
|
{"id": i["id"], "severity": i.get("severity"), "status": i.get("status"),
|
|
"started_at": i.get("started_at"), "title": i.get("title", "")[:200]}
|
|
for i in incidents
|
|
if i.get("status") in ("open", "mitigating", "resolved")
|
|
and i.get("severity") in ("P0", "P1")
|
|
]
|
|
|
|
overdue = []
|
|
for inc in incidents:
|
|
events = self.store.get_events(inc["id"], limit=200)
|
|
for ev in events:
|
|
if ev.get("type") != "followup":
|
|
continue
|
|
meta = ev.get("meta") or {}
|
|
if isinstance(meta, str):
|
|
try:
|
|
meta = json.loads(meta)
|
|
except Exception:
|
|
meta = {}
|
|
if meta.get("status", "open") != "open":
|
|
continue
|
|
due = meta.get("due_date", "")
|
|
if due and due < now_dt.isoformat():
|
|
overdue.append({
|
|
"incident_id": inc["id"],
|
|
"title": meta.get("title", ev.get("message", "")[:200]),
|
|
"due_date": due,
|
|
"priority": meta.get("priority", "P2"),
|
|
"owner": meta.get("owner", ""),
|
|
})
|
|
|
|
total_open = sum(
|
|
1 for inc in incidents
|
|
for ev in self.store.get_events(inc["id"], limit=200)
|
|
if ev.get("type") == "followup"
|
|
and (ev.get("meta") or {}).get("status", "open") == "open"
|
|
)
|
|
|
|
return {
|
|
"open_incidents": open_critical[:20],
|
|
"overdue_followups": overdue[:30],
|
|
"stats": {
|
|
"open_incidents": len(open_critical),
|
|
"overdue": len(overdue),
|
|
"total_open_followups": total_open,
|
|
},
|
|
}
|