New router intelligence modules (26 files): alert_ingest/store, audit_store, architecture_pressure, backlog_generator/store, cost_analyzer, data_governance, dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment, platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files), signature_state_store, sofiia_auto_router, tool_governance New services: - sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static - memory-service: integration_endpoints, integrations, voice_endpoints, static UI - aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents) - sofiia-supervisor: new supervisor service - aistalk-bridge-lite: Telegram bridge lite - calendar-service: CalDAV calendar service with reminders - mlx-stt-service / mlx-tts-service: Apple Silicon speech services - binance-bot-monitor: market monitor service - node-worker: STT/TTS memory providers New tools (9): agent_email, browser_tool, contract_tool, observability_tool, oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus, farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine, session_context, style_adapter, telemetry) Tests: 85+ test files for all new modules Made-with: Cursor
127 lines
4.4 KiB
Python
127 lines
4.4 KiB
Python
"""
|
|
tests/test_risk_dashboard.py — Tests for compute_risk_dashboard.
|
|
|
|
Validates:
|
|
- Top-N sorting by score desc
|
|
- Band count aggregation
|
|
- Critical P0 service detection
|
|
- Env filtering passed through
|
|
"""
|
|
import pytest
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
sys.path.insert(0, str(Path(__file__).resolve().parent.parent / "services" / "router"))
|
|
|
|
from risk_engine import _builtin_defaults, compute_risk_dashboard, _reload_policy
|
|
|
|
|
|
@pytest.fixture(autouse=True)
|
|
def reset_policy_cache():
|
|
_reload_policy()
|
|
yield
|
|
_reload_policy()
|
|
|
|
|
|
@pytest.fixture
|
|
def policy():
|
|
return _builtin_defaults()
|
|
|
|
|
|
def _make_report(service, score, band=None, env="prod"):
|
|
from risk_engine import score_to_band
|
|
p = _builtin_defaults()
|
|
b = band or score_to_band(score, p)
|
|
return {
|
|
"service": service,
|
|
"env": env,
|
|
"score": score,
|
|
"band": b,
|
|
"thresholds": {"warn_at": 50, "fail_at": 80},
|
|
"components": {},
|
|
"reasons": [],
|
|
"recommendations": [],
|
|
"updated_at": "2026-02-23T00:00:00",
|
|
}
|
|
|
|
|
|
class TestDashboardSorting:
|
|
def test_sorted_desc_by_score(self, policy):
|
|
reports = [
|
|
_make_report("a", 30),
|
|
_make_report("b", 90),
|
|
_make_report("c", 10),
|
|
_make_report("d", 55),
|
|
]
|
|
dash = compute_risk_dashboard("prod", top_n=10, service_reports=reports, policy=policy)
|
|
scores = [s["score"] for s in dash["services"]]
|
|
assert scores == sorted(scores, reverse=True)
|
|
|
|
def test_top_n_limits_results(self, policy):
|
|
reports = [_make_report(f"svc{i}", i * 5) for i in range(15)]
|
|
dash = compute_risk_dashboard("prod", top_n=5, service_reports=reports, policy=policy)
|
|
assert len(dash["services"]) == 5
|
|
|
|
def test_top_n_returns_highest(self, policy):
|
|
reports = [_make_report(f"svc{i}", i * 5) for i in range(10)]
|
|
dash = compute_risk_dashboard("prod", top_n=3, service_reports=reports, policy=policy)
|
|
assert all(s["score"] >= 30 for s in dash["services"])
|
|
|
|
def test_empty_service_reports(self, policy):
|
|
dash = compute_risk_dashboard("prod", top_n=10, service_reports=[], policy=policy)
|
|
assert dash["services"] == []
|
|
assert dash["total_services"] == 0
|
|
|
|
|
|
class TestBandCounts:
|
|
def test_band_counts_correct(self, policy):
|
|
reports = [
|
|
_make_report("a", 0, band="low"),
|
|
_make_report("b", 21, band="medium"),
|
|
_make_report("c", 51, band="high"),
|
|
_make_report("d", 81, band="critical"),
|
|
_make_report("e", 85, band="critical"),
|
|
]
|
|
dash = compute_risk_dashboard("prod", top_n=10, service_reports=reports, policy=policy)
|
|
bc = dash["band_counts"]
|
|
assert bc["low"] == 1
|
|
assert bc["medium"] == 1
|
|
assert bc["high"] == 1
|
|
assert bc["critical"] == 2
|
|
|
|
|
|
class TestP0Detection:
|
|
def test_critical_p0_services_detected(self, policy):
|
|
"""gateway and router are p0_services. critical/high band → flagged."""
|
|
reports = [
|
|
_make_report("gateway", 85, band="critical"), # p0, critical
|
|
_make_report("router", 60, band="high"), # p0, high
|
|
_make_report("memory-service", 90, band="critical"), # not p0
|
|
]
|
|
dash = compute_risk_dashboard("prod", top_n=10, service_reports=reports, policy=policy)
|
|
crit_p0 = dash["critical_p0_services"]
|
|
assert "gateway" in crit_p0
|
|
assert "router" in crit_p0
|
|
assert "memory-service" not in crit_p0
|
|
|
|
def test_low_band_p0_not_flagged(self, policy):
|
|
reports = [_make_report("gateway", 10, band="low")]
|
|
dash = compute_risk_dashboard("prod", top_n=10, service_reports=reports, policy=policy)
|
|
assert "gateway" not in dash["critical_p0_services"]
|
|
|
|
|
|
class TestDashboardMetadata:
|
|
def test_env_passed_through(self, policy):
|
|
dash = compute_risk_dashboard("staging", top_n=5, service_reports=[], policy=policy)
|
|
assert dash["env"] == "staging"
|
|
|
|
def test_generated_at_present(self, policy):
|
|
dash = compute_risk_dashboard("prod", service_reports=[], policy=policy)
|
|
assert "generated_at" in dash
|
|
assert dash["generated_at"]
|
|
|
|
def test_total_services_count(self, policy):
|
|
reports = [_make_report(f"s{i}", i * 10) for i in range(4)]
|
|
dash = compute_risk_dashboard("prod", top_n=10, service_reports=reports, policy=policy)
|
|
assert dash["total_services"] == 4
|