Files
microdao-daarion/tests/test_slo_watch_gate.py
Apple 129e4ea1fc feat(platform): add new services, tools, tests and crews modules
New router intelligence modules (26 files): alert_ingest/store, audit_store,
architecture_pressure, backlog_generator/store, cost_analyzer, data_governance,
dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment,
platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files),
signature_state_store, sofiia_auto_router, tool_governance

New services:
- sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static
- memory-service: integration_endpoints, integrations, voice_endpoints, static UI
- aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents)
- sofiia-supervisor: new supervisor service
- aistalk-bridge-lite: Telegram bridge lite
- calendar-service: CalDAV calendar service with reminders
- mlx-stt-service / mlx-tts-service: Apple Silicon speech services
- binance-bot-monitor: market monitor service
- node-worker: STT/TTS memory providers

New tools (9): agent_email, browser_tool, contract_tool, observability_tool,
oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault

New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus,
farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine,
session_context, style_adapter, telemetry)

Tests: 85+ test files for all new modules
Made-with: Cursor
2026-03-03 07:14:14 -08:00

262 lines
9.8 KiB
Python

"""
Tests for slo_watch gate in release_check_runner.
Covers: violations → recommendations, policy strict → blocks, policy warn → pass,
policy off → skip.
"""
import asyncio
import os
import sys
from pathlib import Path
from unittest.mock import patch
ROOT = Path(__file__).resolve().parent.parent
ROUTER = ROOT / "services" / "router"
if str(ROUTER) not in sys.path:
sys.path.insert(0, str(ROUTER))
class MockToolResult:
def __init__(self, success, result=None, error=None):
self.success = success
self.result = result
self.error = error
class MockToolManager:
def __init__(self, slo_data=None, always_pass_others=True):
self.slo_data = slo_data or {
"violations": [],
"metrics": {},
"thresholds": {},
"skipped": False,
}
self.always_pass_others = always_pass_others
self.calls = []
async def execute_tool(self, tool_name, args, agent_id="test"):
self.calls.append((tool_name, args.get("action")))
if tool_name == "observability_tool" and args.get("action") == "slo_snapshot":
return MockToolResult(True, self.slo_data)
if self.always_pass_others:
return MockToolResult(True, {
"pass": True, "blocking_count": 0, "breaking_count": 0,
"unmitigated_high_count": 0, "summary": "ok",
"violations": [], "open_incidents": [], "overdue_followups": [],
"stats": {"open_incidents": 0, "overdue": 0, "total_open_followups": 0},
})
return MockToolResult(False, error="skipped")
def _run_check(tm, inputs, agent="test"):
from release_check_runner import run_release_check
return asyncio.run(run_release_check(tm, inputs, agent))
class TestSLOWatchWarnMode:
"""slo_watch in warn mode: violations → recommendations, always pass."""
def test_violations_generate_recommendations(self):
slo_data = {
"violations": ["latency_p95", "error_rate"],
"metrics": {"latency_p95_ms": 500, "error_rate_pct": 3.0},
"thresholds": {"latency_p95_ms": 300, "error_rate_pct": 1.0},
"skipped": False,
}
tm = MockToolManager(slo_data=slo_data)
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "dev",
"_default_mode": "warn",
"slo_watch": {"mode": "warn"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "gateway"})
assert result["pass"] is True
gate_names = [g["name"] for g in result["gates"]]
assert "slo_watch" in gate_names
slo_gate = next(g for g in result["gates"] if g["name"] == "slo_watch")
assert "latency_p95" in slo_gate["violations"]
assert any("SLO violation" in r for r in result["recommendations"])
def test_no_violations_no_recommendations(self):
slo_data = {
"violations": [],
"metrics": {"latency_p95_ms": 100, "error_rate_pct": 0.1},
"thresholds": {"latency_p95_ms": 300, "error_rate_pct": 1.0},
"skipped": False,
}
tm = MockToolManager(slo_data=slo_data)
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "dev",
"_default_mode": "warn",
"slo_watch": {"mode": "warn"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "gateway"})
assert result["pass"] is True
slo_recs = [r for r in result["recommendations"] if "SLO" in r]
assert len(slo_recs) == 0
class TestSLOWatchStrictMode:
"""slo_watch in strict mode: violations block release."""
def test_violations_block_release(self):
slo_data = {
"violations": ["latency_p95"],
"metrics": {"latency_p95_ms": 500},
"thresholds": {"latency_p95_ms": 200},
"skipped": False,
}
tm = MockToolManager(slo_data=slo_data)
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "staging",
"_default_mode": "warn",
"slo_watch": {"mode": "strict"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "router", "fail_fast": True})
assert result["pass"] is False
def test_no_violations_does_not_block(self):
slo_data = {
"violations": [],
"metrics": {"latency_p95_ms": 50},
"thresholds": {"latency_p95_ms": 200},
"skipped": False,
}
tm = MockToolManager(slo_data=slo_data)
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "staging",
"_default_mode": "warn",
"slo_watch": {"mode": "strict"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "router"})
assert result["pass"] is True
def test_skipped_does_not_block(self):
slo_data = {
"violations": ["latency_p95"],
"skipped": True,
}
tm = MockToolManager(slo_data=slo_data)
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "staging",
"_default_mode": "warn",
"slo_watch": {"mode": "strict"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "router"})
assert result["pass"] is True
class TestSLOWatchOffMode:
"""slo_watch in off mode: gate not called at all."""
def test_gate_skipped_when_off(self):
tm = MockToolManager()
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "dev",
"_default_mode": "warn",
"slo_watch": {"mode": "off"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "off"},
}
result = _run_check(tm, {"service_name": "gateway"})
gate_names = [g["name"] for g in result["gates"]]
assert "slo_watch" not in gate_names
called_actions = [c[1] for c in tm.calls]
assert "slo_snapshot" not in called_actions
def test_gate_skipped_when_disabled_via_input(self):
tm = MockToolManager()
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "dev",
"_default_mode": "warn",
"slo_watch": {"mode": "warn"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "gateway", "run_slo_watch": False})
gate_names = [g["name"] for g in result["gates"]]
assert "slo_watch" not in gate_names
class TestSLOWatchGatewayError:
"""slo_watch is non-fatal on gateway errors."""
def test_gateway_error_becomes_skipped_pass(self):
class FailingTM:
calls = []
async def execute_tool(self, tool_name, args, agent_id="test"):
self.calls.append((tool_name, args.get("action")))
if tool_name == "observability_tool" and args.get("action") == "slo_snapshot":
raise ConnectionError("Prometheus unreachable")
return MockToolResult(True, {
"pass": True, "blocking_count": 0, "breaking_count": 0,
"unmitigated_high_count": 0, "summary": "ok",
"violations": [], "open_incidents": [], "overdue_followups": [],
"stats": {"open_incidents": 0, "overdue": 0, "total_open_followups": 0},
})
tm = FailingTM()
with patch("release_check_runner.load_gate_policy") as mock_policy:
mock_policy.return_value = {
"_profile": "staging",
"_default_mode": "warn",
"slo_watch": {"mode": "strict"},
"followup_watch": {"mode": "off"},
"privacy_watch": {"mode": "off"},
"cost_watch": {"mode": "off"},
"get": lambda name: {"mode": "warn"},
}
result = _run_check(tm, {"service_name": "gateway"})
# Even in strict mode, gateway error should not block
assert result["pass"] is True
slo_gate = next((g for g in result["gates"] if g["name"] == "slo_watch"), None)
if slo_gate:
assert slo_gate.get("skipped") is True