feat(matrix-bridge-dagi): M4–M11 + soak infrastructure (debug inject endpoint)
Includes all milestones M4 through M11: - M4: agent discovery (!agents / !status) - M5: node-aware routing + per-node observability - M6: dynamic policy store (node/agent overrides, import/export) - M7: Prometheus alerts + Grafana dashboard + metrics contract - M8: node health tracker + soft failover + sticky cache + HA persistence - M9: two-step confirm + diff preview for dangerous commands - M10: auto-backup, restore, retention, policy history + change detail - M11: soak scenarios (CI tests) + live soak script Soak infrastructure (this commit): - POST /v1/debug/inject_event (guarded by DEBUG_INJECT_ENABLED=false) - _preflight_inject() and _check_wal() in soak script - --db-path arg for WAL delta reporting - Runbook sections 2a/2b/2c: Step 0 and Step 1 exact commands Made-with: Cursor
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
"""
|
||||
matrix-bridge-dagi — configuration and validation (M2.1 + M2.2 + M3.0)
|
||||
matrix-bridge-dagi — configuration and validation (M2.1 + M2.2 + M3.0 + M3.1)
|
||||
"""
|
||||
import os
|
||||
from dataclasses import dataclass, field
|
||||
@@ -54,6 +54,54 @@ class BridgeConfig:
|
||||
# "ignore" | "reply_error" (send ⛔ to room on unauthorized attempt)
|
||||
control_unauthorized_behavior: str
|
||||
|
||||
# M3.1: Runbook runner — sofiia-console control token
|
||||
sofiia_control_token: str # X-Control-Token for /api/runbooks/internal/runs
|
||||
|
||||
# M3.4: Control channel safety — rate limiting + cooldown
|
||||
control_room_rpm: int # Max commands per room per minute (0 = unlimited)
|
||||
control_operator_rpm: int # Max commands per operator per minute
|
||||
control_run_next_rpm: int # Max !runbook next calls per run_id per minute
|
||||
control_cooldown_s: float # Anti-double-click debounce per (operator, verb, subcmd)
|
||||
|
||||
# M2.3: Persistent event deduplication
|
||||
persistent_dedupe: bool # Enable SQLite-backed dedupe across restarts
|
||||
bridge_data_dir: str # Directory for SQLite DB and other bridge data
|
||||
processed_events_ttl_h: int # TTL for processed events (hours)
|
||||
processed_events_prune_batch: int # Max rows to prune per prune run
|
||||
processed_events_prune_interval_s: int # Prune interval in seconds (0 = disable periodic)
|
||||
|
||||
# M4.0: agent discovery
|
||||
discovery_rpm: int # Max !agents replies per room per minute (0 = unlimited)
|
||||
|
||||
# M5.0: node-aware routing
|
||||
bridge_allowed_nodes: str # Comma-separated: "NODA1,NODA2"
|
||||
bridge_default_node: str # Default node when none specified
|
||||
bridge_room_node_map: str # Optional: "!roomA:server=NODA2;!roomB:server=NODA1"
|
||||
|
||||
# M8.0: node health + soft-failover thresholds
|
||||
node_fail_consecutive: int # Consecutive failures before node marked "down"
|
||||
node_lat_ewma_s: float # EWMA latency threshold (seconds) → "degraded"
|
||||
node_ewma_alpha: float # EWMA smoothing factor (0..1)
|
||||
|
||||
# M8.1: sticky failover cache
|
||||
failover_sticky_ttl_s: float # Seconds to hold sticky node preference after failover (0 = disabled)
|
||||
|
||||
# M8.2: HA state persistence
|
||||
ha_health_snapshot_interval_s: int # Seconds between node health writes to DB (0 = disabled)
|
||||
ha_health_max_age_s: int # Max age of health snapshot to load on startup (seconds)
|
||||
|
||||
# M9.0: Two-step confirmation TTL
|
||||
confirm_ttl_s: float # Seconds a pending !confirm nonce is valid (0 = disabled)
|
||||
|
||||
# M10.0: Policy export retention
|
||||
policy_export_retention_days: int # Days to keep policy exports (0 = keep forever)
|
||||
|
||||
# M10.2: Policy change history
|
||||
policy_history_limit: int # Max rows in policy_changes table (0 = unlimited)
|
||||
|
||||
# M11 soak: synthetic event injection (NEVER enable in production)
|
||||
debug_inject_enabled: bool # POST /v1/debug/inject_event (default: False)
|
||||
|
||||
# Service identity
|
||||
node_id: str
|
||||
build_sha: str
|
||||
@@ -99,6 +147,35 @@ def load_config() -> BridgeConfig:
|
||||
bridge_operator_allowlist=_optional("BRIDGE_OPERATOR_ALLOWLIST", ""),
|
||||
bridge_control_rooms=_optional("BRIDGE_CONTROL_ROOMS", ""),
|
||||
control_unauthorized_behavior=_optional("CONTROL_UNAUTHORIZED_BEHAVIOR", "ignore"),
|
||||
sofiia_control_token=_optional("SOFIIA_CONTROL_TOKEN", ""),
|
||||
control_room_rpm=max(0, int(_optional("CONTROL_ROOM_RPM", "60"))),
|
||||
control_operator_rpm=max(0, int(_optional("CONTROL_OPERATOR_RPM", "30"))),
|
||||
control_run_next_rpm=max(0, int(_optional("CONTROL_RUN_NEXT_RPM", "20"))),
|
||||
control_cooldown_s=max(0.0, float(_optional("CONTROL_COOLDOWN_S", "2.0"))),
|
||||
persistent_dedupe=_optional("PERSISTENT_DEDUPE", "1").strip() not in ("0", "false", ""),
|
||||
bridge_data_dir=_optional("BRIDGE_DATA_DIR", "/app/data"),
|
||||
processed_events_ttl_h=max(1, int(_optional("PROCESSED_EVENTS_TTL_H", "48"))),
|
||||
processed_events_prune_batch=max(1, int(_optional("PROCESSED_EVENTS_PRUNE_BATCH", "5000"))),
|
||||
processed_events_prune_interval_s=max(0, int(_optional("PROCESSED_EVENTS_PRUNE_INTERVAL_S", "3600"))),
|
||||
discovery_rpm=max(0, int(_optional("DISCOVERY_RPM", "20"))),
|
||||
bridge_allowed_nodes=_optional("BRIDGE_ALLOWED_NODES", "NODA1"),
|
||||
bridge_default_node=_optional("BRIDGE_DEFAULT_NODE", "NODA1"),
|
||||
bridge_room_node_map=_optional("BRIDGE_ROOM_NODE_MAP", ""),
|
||||
# M8.0: node health thresholds
|
||||
node_fail_consecutive=max(1, int(_optional("NODE_FAIL_CONSEC", "3"))),
|
||||
node_lat_ewma_s=max(0.5, float(_optional("NODE_LAT_EWMA_S", "12.0"))),
|
||||
node_ewma_alpha=min(1.0, max(0.01, float(_optional("NODE_EWMA_ALPHA", "0.3")))),
|
||||
# M8.1: sticky failover TTL (0 = disabled)
|
||||
failover_sticky_ttl_s=max(0.0, float(_optional("FAILOVER_STICKY_TTL_S", "300.0"))),
|
||||
# M8.2: HA state persistence
|
||||
ha_health_snapshot_interval_s=max(0, int(_optional("HA_HEALTH_SNAPSHOT_INTERVAL_S", "60"))),
|
||||
ha_health_max_age_s=max(0, int(_optional("HA_HEALTH_MAX_AGE_S", "600"))),
|
||||
# M9.0: Two-step confirmation TTL (0 = disabled)
|
||||
confirm_ttl_s=max(0.0, float(_optional("CONFIRM_TTL_S", "120.0"))),
|
||||
policy_export_retention_days=max(0, int(_optional("POLICY_EXPORT_RETENTION_DAYS", "30"))),
|
||||
policy_history_limit=max(0, int(_optional("POLICY_HISTORY_LIMIT", "100"))),
|
||||
debug_inject_enabled=_optional("DEBUG_INJECT_ENABLED", "false").lower()
|
||||
in ("1", "true", "yes"),
|
||||
node_id=_optional("NODE_ID", "NODA1"),
|
||||
build_sha=_optional("BUILD_SHA", "dev"),
|
||||
build_time=_optional("BUILD_TIME", "local"),
|
||||
|
||||
167
services/matrix-bridge-dagi/app/confirm_store.py
Normal file
167
services/matrix-bridge-dagi/app/confirm_store.py
Normal file
@@ -0,0 +1,167 @@
|
||||
"""
|
||||
confirm_store — M9.0: Two-step confirmation for dangerous control commands.
|
||||
|
||||
Flow:
|
||||
1. Operator issues a dangerous command (e.g. !node set, !policy import mode=replace).
|
||||
2. Bridge calls ConfirmStore.add(..., callback=<coroutine>) → returns a nonce.
|
||||
3. Bridge replies: "Type !confirm <nonce> within Ns to apply."
|
||||
4. Operator sends !confirm <nonce>.
|
||||
5. Bridge calls ConfirmStore.pop(nonce, sender_hash) → returns PendingConfirmation.
|
||||
6. Bridge executes callback() → (reply_text, diff_summary).
|
||||
7. Audit trail: matrix.control.intent / matrix.control.confirmed / matrix.control.applied.
|
||||
|
||||
Safety:
|
||||
- One pending entry per sender (new request replaces old).
|
||||
- Nonce is sender-bound: wrong sender_hash → pop returns None.
|
||||
- TTL enforced via monotonic time; expired entries not returned.
|
||||
- Nonce: 6 uppercase alphanumeric (NONCE_LEN chars from NONCE_CHARS).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import secrets
|
||||
import string
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple
|
||||
|
||||
NONCE_LEN = 6
|
||||
NONCE_CHARS = string.ascii_uppercase + string.digits
|
||||
|
||||
_DEFAULT_TTL_S = 120.0
|
||||
|
||||
|
||||
def make_nonce() -> str:
|
||||
"""Generate a cryptographically random 6-char uppercase alphanumeric nonce."""
|
||||
return "".join(secrets.choice(NONCE_CHARS) for _ in range(NONCE_LEN))
|
||||
|
||||
|
||||
@dataclass
|
||||
class PendingConfirmation:
|
||||
"""A pending two-step confirmation waiting for !confirm <nonce>."""
|
||||
nonce: str
|
||||
sender_hash: str
|
||||
verb: str # e.g. "node.set", "room.agents set", "policy.import"
|
||||
normalized_args: str # human-readable args for audit
|
||||
action_summary: str # "!node set room=!x:s node=NODA2"
|
||||
room_id: str # Matrix room_id where the intent was issued
|
||||
callback: Callable[[], Awaitable[Tuple[str, str]]] # async () → (reply_text, diff_summary)
|
||||
expires_at: float # time.monotonic() deadline
|
||||
|
||||
|
||||
class ConfirmStore:
|
||||
"""
|
||||
In-memory, thread-safe store for pending two-step confirmation entries.
|
||||
|
||||
One pending entry per sender at a time. If the same sender issues a new
|
||||
dangerous command before confirming the previous one, the old entry is
|
||||
replaced (new nonce issued).
|
||||
"""
|
||||
|
||||
def __init__(self, ttl_s: float = _DEFAULT_TTL_S) -> None:
|
||||
self.ttl_s = ttl_s
|
||||
self._lock = threading.RLock()
|
||||
self._by_nonce: Dict[str, PendingConfirmation] = {}
|
||||
self._by_sender: Dict[str, str] = {} # sender_hash → nonce
|
||||
|
||||
# ── Public API ────────────────────────────────────────────────────────────
|
||||
|
||||
def add(
|
||||
self,
|
||||
sender_hash: str,
|
||||
verb: str,
|
||||
normalized_args: str,
|
||||
action_summary: str,
|
||||
room_id: str,
|
||||
callback: Callable[[], Awaitable[Tuple[str, str]]],
|
||||
) -> str:
|
||||
"""
|
||||
Create a pending confirmation entry. Returns the nonce string.
|
||||
|
||||
If the sender already has a pending entry it is replaced (old nonce
|
||||
becomes invalid immediately).
|
||||
"""
|
||||
nonce = make_nonce()
|
||||
expires_at = time.monotonic() + self.ttl_s
|
||||
entry = PendingConfirmation(
|
||||
nonce=nonce,
|
||||
sender_hash=sender_hash,
|
||||
verb=verb,
|
||||
normalized_args=normalized_args,
|
||||
action_summary=action_summary,
|
||||
room_id=room_id,
|
||||
callback=callback,
|
||||
expires_at=expires_at,
|
||||
)
|
||||
with self._lock:
|
||||
# Evict any previous pending entry for this sender
|
||||
old_nonce = self._by_sender.get(sender_hash)
|
||||
if old_nonce:
|
||||
self._by_nonce.pop(old_nonce, None)
|
||||
self._by_nonce[nonce] = entry
|
||||
self._by_sender[sender_hash] = nonce
|
||||
return nonce
|
||||
|
||||
def pop(self, nonce: str, sender_hash: str) -> Optional[PendingConfirmation]:
|
||||
"""
|
||||
Retrieve and atomically remove a pending confirmation.
|
||||
|
||||
Returns None if:
|
||||
- nonce does not exist,
|
||||
- sender_hash does not match the entry owner,
|
||||
- or the entry has expired.
|
||||
"""
|
||||
nonce = nonce.upper()
|
||||
with self._lock:
|
||||
entry = self._by_nonce.get(nonce)
|
||||
if entry is None:
|
||||
return None
|
||||
if entry.sender_hash != sender_hash:
|
||||
# Wrong sender — deny without disclosing any detail
|
||||
return None
|
||||
if time.monotonic() > entry.expires_at:
|
||||
# Expired — clean up and deny
|
||||
self._by_nonce.pop(nonce, None)
|
||||
self._by_sender.pop(entry.sender_hash, None)
|
||||
return None
|
||||
# Valid confirmation — consume the entry
|
||||
self._by_nonce.pop(nonce)
|
||||
self._by_sender.pop(sender_hash, None)
|
||||
return entry
|
||||
|
||||
def pending_nonce(self, sender_hash: str) -> Optional[str]:
|
||||
"""
|
||||
Return the current pending nonce for a sender (non-destructive peek).
|
||||
Returns None if no entry or the entry has expired.
|
||||
"""
|
||||
with self._lock:
|
||||
nonce = self._by_sender.get(sender_hash)
|
||||
if nonce is None:
|
||||
return None
|
||||
entry = self._by_nonce.get(nonce)
|
||||
if entry is None or time.monotonic() > entry.expires_at:
|
||||
# Lazy eviction
|
||||
self._by_nonce.pop(nonce, None)
|
||||
self._by_sender.pop(sender_hash, None)
|
||||
return None
|
||||
return nonce
|
||||
|
||||
def pending_count(self) -> int:
|
||||
"""Number of non-expired pending entries (for /health, metrics)."""
|
||||
now = time.monotonic()
|
||||
with self._lock:
|
||||
return sum(1 for e in self._by_nonce.values() if now <= e.expires_at)
|
||||
|
||||
def cleanup(self) -> int:
|
||||
"""Eagerly remove all expired entries. Returns count removed."""
|
||||
now = time.monotonic()
|
||||
removed = 0
|
||||
with self._lock:
|
||||
expired_nonces = [
|
||||
n for n, e in self._by_nonce.items() if now > e.expires_at
|
||||
]
|
||||
for n in expired_nonces:
|
||||
entry = self._by_nonce.pop(n)
|
||||
self._by_sender.pop(entry.sender_hash, None)
|
||||
removed += 1
|
||||
return removed
|
||||
@@ -23,18 +23,124 @@ Audit events emitted:
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, FrozenSet, List, Optional, Tuple
|
||||
from typing import Any, Dict, FrozenSet, List, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ── Constants ─────────────────────────────────────────────────────────────────
|
||||
|
||||
# Supported control verbs (M3.1+ will implement them fully)
|
||||
# Supported control verbs
|
||||
VERB_RUNBOOK = "runbook"
|
||||
VERB_STATUS = "status"
|
||||
VERB_NODES = "nodes" # M5.1: node policy overview
|
||||
VERB_NODE = "node" # M6.0: dynamic room-node override commands
|
||||
VERB_ROOM = "room" # M6.1: dynamic mixed room agent overrides
|
||||
VERB_POLICY = "policy" # M6.2: policy snapshot export/import
|
||||
VERB_CONFIRM = "confirm" # M9.0: two-step confirmation for dangerous commands
|
||||
VERB_HELP = "help"
|
||||
|
||||
KNOWN_VERBS: FrozenSet[str] = frozenset({VERB_RUNBOOK, VERB_STATUS, VERB_HELP})
|
||||
KNOWN_VERBS: FrozenSet[str] = frozenset({
|
||||
VERB_RUNBOOK, VERB_STATUS, VERB_NODES, VERB_NODE,
|
||||
VERB_ROOM, VERB_POLICY, VERB_CONFIRM, VERB_HELP,
|
||||
})
|
||||
|
||||
# ── M9.0: Dangerous command detection ─────────────────────────────────────────
|
||||
|
||||
def is_dangerous_cmd(cmd: "ControlCommand") -> bool:
|
||||
"""
|
||||
Return True if the command requires two-step confirmation before applying.
|
||||
|
||||
Dangerous verbs:
|
||||
!node set room=... node=... — changes room routing
|
||||
!room agents set room=... agents=... — replaces all agents for a room
|
||||
!policy import ... — overwrites policy DB (both modes)
|
||||
"""
|
||||
v = cmd.verb
|
||||
sub = (cmd.subcommand or "").strip().lower()
|
||||
if v == VERB_NODE and sub == "set":
|
||||
return True
|
||||
if v == VERB_ROOM and sub == "agents" and cmd.args and cmd.args[0].lower() == "set":
|
||||
return True
|
||||
if v == VERB_POLICY and sub == "import":
|
||||
return True
|
||||
# M10.0: prune_exports is dangerous only when dry_run=0 (actual deletion)
|
||||
if v == VERB_POLICY and sub == "prune_exports":
|
||||
dry_raw = cmd.kwargs.get("dry_run", "1").strip()
|
||||
is_dry = dry_raw not in ("0", "false", "no")
|
||||
return not is_dry
|
||||
# M10.1: restore is always dangerous (no dry_run option)
|
||||
if v == VERB_POLICY and sub == "restore":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def build_normalized_args(cmd: "ControlCommand") -> str:
|
||||
"""
|
||||
Build a human-readable normalized representation of the command args.
|
||||
Used in audit events and confirmation prompts.
|
||||
"""
|
||||
parts: list[str] = []
|
||||
# For !room agents set, skip the "set" positional from args display
|
||||
skip_first_arg = cmd.verb == VERB_ROOM and cmd.subcommand == "agents"
|
||||
for i, a in enumerate(cmd.args):
|
||||
if skip_first_arg and i == 0:
|
||||
continue
|
||||
parts.append(a)
|
||||
for k, v in sorted(cmd.kwargs.items()):
|
||||
parts.append(f"{k}={v}")
|
||||
return " ".join(parts)
|
||||
|
||||
|
||||
def confirm_intent_reply(action_summary: str, nonce: str, ttl_s: int) -> str:
|
||||
"""Reply when a dangerous command is held pending confirmation (M9.0)."""
|
||||
return (
|
||||
f"⚠️ **Confirm required**\n"
|
||||
f"Action: `{action_summary}`\n"
|
||||
f"Type `!confirm {nonce}` within {ttl_s}s to apply.\n"
|
||||
f"_(Only you can confirm this action.)_"
|
||||
)
|
||||
|
||||
|
||||
def confirm_success_reply(action_result: str) -> str:
|
||||
"""Reply when a confirmation is accepted and the action applied (M9.0)."""
|
||||
return f"✅ Confirmed and applied.\n{action_result}"
|
||||
|
||||
|
||||
def confirm_expired_reply() -> str:
|
||||
"""Reply when the nonce is invalid, expired, or from a different sender (M9.0)."""
|
||||
return (
|
||||
"❌ Invalid or expired confirmation code. "
|
||||
"The action was **not** applied.\n"
|
||||
"Re-issue the original command to get a new code."
|
||||
)
|
||||
|
||||
# M6.1: !room subcommand + actions
|
||||
ROOM_SUBCMD_AGENTS = "agents"
|
||||
ROOM_ACTION_SET = "set"
|
||||
ROOM_ACTION_ADD = "add"
|
||||
ROOM_ACTION_REMOVE = "remove"
|
||||
ROOM_ACTION_GET = "get"
|
||||
ROOM_ACTION_LIST = "list"
|
||||
ROOM_ACTION_UNSET = "unset" # remove full override
|
||||
_VALID_ROOM_ACTIONS = frozenset({
|
||||
ROOM_ACTION_SET, ROOM_ACTION_ADD, ROOM_ACTION_REMOVE,
|
||||
ROOM_ACTION_GET, ROOM_ACTION_LIST, ROOM_ACTION_UNSET,
|
||||
})
|
||||
|
||||
# M6.0: !node subcommands
|
||||
NODE_SUBCMD_SET = "set"
|
||||
NODE_SUBCMD_UNSET = "unset"
|
||||
NODE_SUBCMD_GET = "get"
|
||||
NODE_SUBCMD_LIST = "list"
|
||||
_VALID_NODE_SUBCMDS = frozenset({NODE_SUBCMD_SET, NODE_SUBCMD_UNSET, NODE_SUBCMD_GET, NODE_SUBCMD_LIST})
|
||||
|
||||
# Runbook subcommands (M3.x)
|
||||
SUBCOMMAND_START = "start" # M3.1 — implemented
|
||||
SUBCOMMAND_NEXT = "next" # M3.2 — implemented
|
||||
SUBCOMMAND_COMPLETE = "complete" # M3.2 — implemented
|
||||
SUBCOMMAND_EVIDENCE = "evidence" # M3.3 — implemented
|
||||
SUBCOMMAND_STATUS = "status" # M3.3 — implemented
|
||||
SUBCOMMAND_POST_REVIEW = "post_review" # M3.3 — implemented
|
||||
|
||||
# Max command line length to guard against garbage injection
|
||||
_MAX_CMD_LEN = 512
|
||||
@@ -225,10 +331,814 @@ def check_authorization(
|
||||
# ── Reply helpers ─────────────────────────────────────────────────────────────
|
||||
|
||||
def not_implemented_reply(cmd: ControlCommand) -> str:
|
||||
"""Reply for known commands not yet implemented (M3.0 stub)."""
|
||||
"""Reply for known commands not yet implemented."""
|
||||
return (
|
||||
f"✅ Command acknowledged: `{cmd.raw}`\n"
|
||||
f"⏳ `!{cmd.verb} {cmd.subcommand}` — implementation pending (M3.1+)."
|
||||
f"⏳ `!{cmd.verb} {cmd.subcommand}` — implementation pending."
|
||||
)
|
||||
|
||||
|
||||
def next_usage_reply() -> str:
|
||||
"""Reply when !runbook next is called without a run_id."""
|
||||
return (
|
||||
"⚠️ Usage: `!runbook next <run_id>`\n"
|
||||
"Example: `!runbook next abc-123`"
|
||||
)
|
||||
|
||||
|
||||
def complete_usage_reply() -> str:
|
||||
"""Reply when !runbook complete is missing required args."""
|
||||
return (
|
||||
"⚠️ Usage: `!runbook complete <run_id> step=<n> status=ok|warn|fail [notes=...]`\n"
|
||||
"Example: `!runbook complete abc-123 step=3 status=ok notes=done`\n"
|
||||
"Notes with spaces: join without quotes — `notes=done_and_verified`."
|
||||
)
|
||||
|
||||
|
||||
def start_usage_reply() -> str:
|
||||
"""Reply when !runbook start is called with missing/invalid runbook_path."""
|
||||
return (
|
||||
"⚠️ Usage: `!runbook start <runbook_path> [node=NODA1]`\n"
|
||||
"Example: `!runbook start runbooks/rehearsal-v1-checklist.md node=NODA1`\n"
|
||||
"runbook_path must be a relative path without `..`."
|
||||
)
|
||||
|
||||
|
||||
def runbook_started_reply(run_id: str, steps_total: int, status: str) -> str:
|
||||
"""Success reply after sofiia-console creates a runbook run."""
|
||||
return (
|
||||
f"✅ runbook started: `run_id={run_id}` steps={steps_total} status={status}\n"
|
||||
f"Next: `!runbook next {run_id}`"
|
||||
)
|
||||
|
||||
|
||||
def runbook_start_error_reply(reason: str) -> str:
|
||||
"""Error reply when sofiia-console returns a non-2xx or connection error."""
|
||||
return f"❌ failed to start runbook: {reason}"
|
||||
|
||||
|
||||
# ── M3.2 reply helpers ────────────────────────────────────────────────────────
|
||||
|
||||
# Max chars of instructions_md to include in Matrix message before truncating
|
||||
_INSTRUCTIONS_EXCERPT_MAX = 1500
|
||||
|
||||
|
||||
def next_manual_reply(
|
||||
run_id: str,
|
||||
step_index: int,
|
||||
steps_total: Optional[int],
|
||||
title: str,
|
||||
instructions_md: str,
|
||||
) -> str:
|
||||
"""Reply for a manual step returned by !runbook next."""
|
||||
step_label = f"Step {step_index + 1}"
|
||||
if steps_total:
|
||||
step_label += f"/{steps_total}"
|
||||
|
||||
excerpt = instructions_md.strip()
|
||||
truncated = False
|
||||
if len(excerpt) > _INSTRUCTIONS_EXCERPT_MAX:
|
||||
excerpt = excerpt[:_INSTRUCTIONS_EXCERPT_MAX].rsplit("\n", 1)[0]
|
||||
truncated = True
|
||||
|
||||
parts = [
|
||||
f"🧭 {step_label}: **{title}**",
|
||||
"",
|
||||
excerpt,
|
||||
]
|
||||
if truncated:
|
||||
parts.append("_...(truncated — open in console for full instructions)_")
|
||||
parts += [
|
||||
"",
|
||||
f"Complete: `!runbook complete {run_id} step={step_index} status=ok`",
|
||||
]
|
||||
return "\n".join(parts)
|
||||
|
||||
|
||||
def next_auto_reply(
|
||||
run_id: str,
|
||||
step_index: int,
|
||||
action_type: str,
|
||||
step_status: str,
|
||||
duration_ms: Optional[int],
|
||||
completed: bool,
|
||||
) -> str:
|
||||
"""Reply for an auto step (http_check/script) completed by !runbook next."""
|
||||
emoji = {"ok": "✅", "warn": "⚠️", "fail": "❌"}.get(step_status, "ℹ️")
|
||||
dur = f" duration={duration_ms}ms" if duration_ms is not None else ""
|
||||
header = f"{emoji} step {step_index + 1} ({action_type}) {step_status}{dur}"
|
||||
|
||||
if completed:
|
||||
return (
|
||||
f"{header}\n"
|
||||
"🎉 All steps completed!\n"
|
||||
f"Get evidence: `!runbook evidence {run_id}`"
|
||||
)
|
||||
return f"{header}\nNext: `!runbook next {run_id}`"
|
||||
|
||||
|
||||
def next_error_reply(run_id: str, reason: str) -> str:
|
||||
"""Error reply when !runbook next fails."""
|
||||
return f"❌ failed to advance runbook: {reason}"
|
||||
|
||||
|
||||
def complete_ok_reply(run_id: str, step_index: int, status: str, run_completed: bool) -> str:
|
||||
"""Success reply after !runbook complete."""
|
||||
emoji = {"ok": "✅", "warn": "⚠️", "fail": "❌", "skipped": "⏭️"}.get(status, "✅")
|
||||
line1 = f"{emoji} recorded step {step_index + 1}: {status}"
|
||||
if run_completed:
|
||||
return f"{line1}\n🎉 All steps completed!\nGet evidence: `!runbook evidence {run_id}`"
|
||||
return f"{line1}\nNext: `!runbook next {run_id}`"
|
||||
|
||||
|
||||
def complete_error_reply(run_id: str, reason: str) -> str:
|
||||
"""Error reply when !runbook complete fails."""
|
||||
return f"❌ failed to complete step: {reason}"
|
||||
|
||||
|
||||
# ── M3.3 reply helpers ────────────────────────────────────────────────────────
|
||||
|
||||
def status_usage_reply() -> str:
|
||||
return (
|
||||
"⚠️ Usage: `!runbook status <run_id>`\n"
|
||||
"Example: `!runbook status abc-123`"
|
||||
)
|
||||
|
||||
|
||||
def evidence_usage_reply() -> str:
|
||||
return (
|
||||
"⚠️ Usage: `!runbook evidence <run_id>`\n"
|
||||
"Example: `!runbook evidence abc-123`"
|
||||
)
|
||||
|
||||
|
||||
def post_review_usage_reply() -> str:
|
||||
return (
|
||||
"⚠️ Usage: `!runbook post_review <run_id>`\n"
|
||||
"Example: `!runbook post_review abc-123`"
|
||||
)
|
||||
|
||||
|
||||
def status_reply(run: dict) -> str:
|
||||
"""Format !runbook status reply from a get_run response."""
|
||||
run_id = run.get("run_id", "?")
|
||||
status = run.get("status", "?")
|
||||
current = run.get("current_step", 0)
|
||||
steps_total = run.get("steps_total") or len(run.get("steps", []))
|
||||
runbook_path = run.get("runbook_path", "?")
|
||||
node_id = run.get("node_id", "?")
|
||||
evidence_path = run.get("evidence_path")
|
||||
|
||||
# Count warn/fail steps
|
||||
steps = run.get("steps", [])
|
||||
warn_count = sum(1 for s in steps if s.get("status") == "warn")
|
||||
fail_count = sum(1 for s in steps if s.get("status") == "fail")
|
||||
|
||||
status_emoji = {
|
||||
"running": "🔄", "completed": "✅", "aborted": "🛑", "paused": "⏸️",
|
||||
}.get(status, "ℹ️")
|
||||
|
||||
step_label = f"{current}/{steps_total}" if steps_total else str(current)
|
||||
lines = [
|
||||
f"{status_emoji} `run_id={run_id}` status={status} step={step_label}",
|
||||
f"runbook: `{runbook_path}` node: {node_id}",
|
||||
]
|
||||
if warn_count or fail_count:
|
||||
lines.append(f"warn={warn_count} fail={fail_count}")
|
||||
if evidence_path:
|
||||
lines.append(f"evidence: `{evidence_path}`")
|
||||
|
||||
if status == "completed" and not evidence_path:
|
||||
lines.append(f"Get evidence: `!runbook evidence {run_id}`")
|
||||
elif status == "completed" and evidence_path:
|
||||
lines.append(f"Post-review: `!runbook post_review {run_id}`")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def status_error_reply(run_id: str, reason: str) -> str:
|
||||
return f"❌ failed to get status: {reason}"
|
||||
|
||||
|
||||
def evidence_reply(result: dict) -> str:
|
||||
"""Success reply after !runbook evidence."""
|
||||
path = result.get("evidence_path", "?")
|
||||
size = result.get("bytes", 0)
|
||||
run_id = result.get("run_id", "")
|
||||
ts = result.get("created_at", "")
|
||||
lines = [f"📄 evidence created: `{path}` (bytes={size})"]
|
||||
if ts:
|
||||
lines.append(f"created_at: {ts}")
|
||||
if run_id:
|
||||
lines.append(f"Next: `!runbook post_review {run_id}`")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def evidence_error_reply(run_id: str, reason: str) -> str:
|
||||
return f"❌ failed to generate evidence: {reason}"
|
||||
|
||||
|
||||
def post_review_reply(result: dict) -> str:
|
||||
"""Success reply after !runbook post_review."""
|
||||
path = result.get("path", "?")
|
||||
size = result.get("bytes", 0)
|
||||
ts = result.get("created_at", "")
|
||||
lines = [f"🧾 post-review created: `{path}` (bytes={size})"]
|
||||
if ts:
|
||||
lines.append(f"created_at: {ts}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def post_review_error_reply(run_id: str, reason: str) -> str:
|
||||
return f"❌ failed to generate post-review: {reason}"
|
||||
|
||||
|
||||
# ── M3.4 safety helpers ───────────────────────────────────────────────────────
|
||||
|
||||
#: Maximum length of notes/free-text operator input accepted before truncation.
|
||||
MAX_NOTES_LEN: int = 500
|
||||
|
||||
#: Control characters (U+0000–U+001F minus tab/newline) that must be stripped.
|
||||
_CTRL_CHARS = "".join(chr(i) for i in range(32) if i not in (9, 10, 13))
|
||||
|
||||
|
||||
def sanitize_notes(notes: str) -> str:
|
||||
"""
|
||||
Strip control characters and truncate notes to MAX_NOTES_LEN.
|
||||
|
||||
Safe to call with any string; returns empty string for falsy input.
|
||||
"""
|
||||
if not notes:
|
||||
return ""
|
||||
cleaned = notes.translate(str.maketrans("", "", _CTRL_CHARS))
|
||||
if len(cleaned) > MAX_NOTES_LEN:
|
||||
cleaned = cleaned[:MAX_NOTES_LEN] + "…"
|
||||
return cleaned
|
||||
|
||||
|
||||
def rate_limited_reply(scope: str, retry_after_s: float) -> str:
|
||||
"""Reply when a control command is rejected by rate limiter or cooldown."""
|
||||
secs = f"{retry_after_s:.0f}s" if retry_after_s >= 1 else "a moment"
|
||||
return f"⏳ rate limited ({scope}), retry after {secs}"
|
||||
|
||||
|
||||
def status_not_available_reply() -> str:
|
||||
return "⚠️ Bridge status not available (service initialising or config missing)."
|
||||
|
||||
|
||||
# M5.1: !nodes reply
|
||||
_MAX_ROOM_OVERRIDES_SHOWN = 10
|
||||
|
||||
|
||||
def nodes_reply(
|
||||
policy_info: dict,
|
||||
node_stats: Optional[dict] = None,
|
||||
sticky_info: Optional[dict] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Compact reply for `!nodes` in control room.
|
||||
|
||||
policy_info: from NodePolicy.as_info_dict()
|
||||
node_stats: optional dict {node_id: {"routed": N, "rejected": M, "health": ..., ...}}
|
||||
sticky_info: optional dict from StickyNodeCache (M8.1)
|
||||
"""
|
||||
default = policy_info.get("default_node", "?")
|
||||
allowed = sorted(policy_info.get("allowed_nodes") or [])
|
||||
overrides = policy_info.get("room_overrides", {}) or {}
|
||||
|
||||
allowed_str = ", ".join(f"`{n}`" for n in allowed)
|
||||
lines = [
|
||||
"🌐 **Node policy**",
|
||||
f"Default: `{default}` Allowed: {allowed_str}",
|
||||
]
|
||||
|
||||
if isinstance(overrides, dict) and overrides:
|
||||
lines.append(f"\n**Room overrides** ({len(overrides)}):")
|
||||
items = list(overrides.items())[:_MAX_ROOM_OVERRIDES_SHOWN]
|
||||
for room_id, node in items:
|
||||
lines.append(f" `{room_id}` → `{node}`")
|
||||
if len(overrides) > _MAX_ROOM_OVERRIDES_SHOWN:
|
||||
lines.append(f" _(+{len(overrides) - _MAX_ROOM_OVERRIDES_SHOWN} more)_")
|
||||
elif isinstance(overrides, int):
|
||||
# as_info_dict returns room_overrides as int count, not dict
|
||||
if overrides:
|
||||
lines.append(f"\nRoom overrides: {overrides}")
|
||||
else:
|
||||
lines.append("\nNo room overrides configured.")
|
||||
else:
|
||||
lines.append("\nNo room overrides configured.")
|
||||
|
||||
if node_stats:
|
||||
lines.append("\n**Per-node stats** (since last restart):")
|
||||
for node_id in sorted(node_stats):
|
||||
ns = node_stats[node_id]
|
||||
routed = ns.get("routed", 0)
|
||||
rejected = ns.get("rejected", 0)
|
||||
health = ns.get("health", "")
|
||||
ewma = ns.get("ewma_latency_s")
|
||||
consec = ns.get("consecutive_failures", 0)
|
||||
stat_parts = [f"routed={routed}", f"rejected={rejected}"]
|
||||
if health:
|
||||
stat_parts.append(f"health={health}")
|
||||
if ewma is not None:
|
||||
stat_parts.append(f"ewma={ewma:.2f}s")
|
||||
if consec:
|
||||
stat_parts.append(f"consec_fail={consec}")
|
||||
lines.append(f" `{node_id}`: " + " ".join(stat_parts))
|
||||
|
||||
# M8.1: sticky cache section
|
||||
if sticky_info is not None:
|
||||
active = sticky_info.get("active_keys", 0)
|
||||
ttl = sticky_info.get("ttl_s", 0)
|
||||
if active:
|
||||
lines.append(f"\n**Sticky routing** (anti-flap): {active} active ttl={ttl:.0f}s")
|
||||
for entry in sticky_info.get("entries", []):
|
||||
rem = entry.get("remaining_s", 0)
|
||||
lines.append(
|
||||
f" `{entry['key']}` → `{entry['node']}` ({rem:.0f}s left)"
|
||||
)
|
||||
if sticky_info.get("truncated"):
|
||||
lines.append(f" _(+{sticky_info['truncated']} more)_")
|
||||
else:
|
||||
lines.append(f"\nSticky routing: none active ttl={ttl:.0f}s")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
# ── M6.0: !node subcommand parser + reply helpers ──────────────────────────────
|
||||
|
||||
import re as _re
|
||||
|
||||
_ROOM_KWARG_RE = _re.compile(r"\broom=(\S+)", _re.IGNORECASE)
|
||||
_NODE_VAL_RE = _re.compile(r"\bnode=(\w+)", _re.IGNORECASE)
|
||||
_ROOM_ID_RE = _re.compile(r"^![a-zA-Z0-9._\-]+:[a-zA-Z0-9._\-]+$")
|
||||
|
||||
|
||||
def parse_node_cmd(args_text: str) -> Tuple[str, Optional[str], Optional[str]]:
|
||||
"""
|
||||
Parse `!node <subcommand> [room=...] [node=...]` arguments.
|
||||
|
||||
Returns (subcmd, room_id_or_None, node_id_or_None).
|
||||
subcmd is lower-cased; node_id is upper-cased.
|
||||
"""
|
||||
parts = args_text.strip().split(None, 1)
|
||||
if not parts:
|
||||
return ("", None, None)
|
||||
subcmd = parts[0].lower()
|
||||
rest = parts[1] if len(parts) > 1 else ""
|
||||
|
||||
room_m = _ROOM_KWARG_RE.search(rest)
|
||||
node_m = _NODE_VAL_RE.search(rest)
|
||||
|
||||
room_id = room_m.group(1) if room_m else None
|
||||
node_id = node_m.group(1).upper() if node_m else None
|
||||
return (subcmd, room_id, node_id)
|
||||
|
||||
|
||||
def node_cmd_validate_room(room_id: str) -> bool:
|
||||
"""Return True if room_id matches basic Matrix room ID format."""
|
||||
return bool(_ROOM_ID_RE.match(room_id)) if room_id else False
|
||||
|
||||
|
||||
def node_cmd_reply_set(room_id: str, node_id: str) -> str:
|
||||
return f"✅ Override set: `{room_id}` → `{node_id}`"
|
||||
|
||||
|
||||
def node_cmd_reply_unset_ok(room_id: str) -> str:
|
||||
return f"✅ Override removed for `{room_id}`"
|
||||
|
||||
|
||||
def node_cmd_reply_unset_not_found(room_id: str) -> str:
|
||||
return f"ℹ️ No override was set for `{room_id}`"
|
||||
|
||||
|
||||
def node_cmd_reply_get(
|
||||
room_id: str,
|
||||
node_id: Optional[str],
|
||||
env_node: Optional[str],
|
||||
default_node: str,
|
||||
) -> str:
|
||||
lines = [f"📌 **Node info for** `{room_id}`"]
|
||||
if node_id:
|
||||
lines.append(f"Dynamic override: `{node_id}` _(set by operator)_")
|
||||
else:
|
||||
lines.append("Dynamic override: _none_")
|
||||
if env_node:
|
||||
lines.append(f"Env map: `{env_node}`")
|
||||
lines.append(f"Default: `{default_node}`")
|
||||
effective = node_id or env_node or default_node
|
||||
lines.append(f"\nEffective node: **`{effective}`**")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def node_cmd_reply_list(
|
||||
overrides: List[Tuple[str, str, int]],
|
||||
total: int,
|
||||
) -> str:
|
||||
import datetime
|
||||
lines = [f"📋 **Dynamic node overrides** ({total} total)"]
|
||||
if not overrides:
|
||||
lines.append("_None set._")
|
||||
else:
|
||||
for room_id, node_id, updated_at in overrides:
|
||||
ts = datetime.datetime.utcfromtimestamp(updated_at).strftime("%Y-%m-%d %H:%M")
|
||||
lines.append(f" `{room_id}` → `{node_id}` _(at {ts} UTC)_")
|
||||
if total > len(overrides):
|
||||
lines.append(f" _(+{total - len(overrides)} more)_")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def node_cmd_reply_error(msg: str) -> str:
|
||||
return (
|
||||
f"❌ {msg}\n\n"
|
||||
"Usage:\n"
|
||||
" `!node set room=!room:server node=NODA2`\n"
|
||||
" `!node unset room=!room:server`\n"
|
||||
" `!node get room=!room:server`\n"
|
||||
" `!node list`"
|
||||
)
|
||||
|
||||
|
||||
# ── M6.1: !room agents reply helpers ──────────────────────────────────────────
|
||||
|
||||
_AGENTS_KWARG_RE = _re.compile(r"\bagents=(\S+)", _re.IGNORECASE)
|
||||
_AGENT_KWARG_RE = _re.compile(r"\bagent=(\w+)", _re.IGNORECASE)
|
||||
_DEFAULT_KWARG_RE = _re.compile(r"\bdefault=(\w+)", _re.IGNORECASE)
|
||||
|
||||
|
||||
def parse_room_agents_cmd(
|
||||
subcommand: str,
|
||||
args: tuple,
|
||||
kwargs: Dict[str, str],
|
||||
) -> Tuple[str, Optional[str], Optional[List[str]], Optional[str], Optional[str]]:
|
||||
"""
|
||||
Parse !room agents <action> [room=...] [agents=...] [agent=...] [default=...] args.
|
||||
|
||||
Returns (action, room_id, agents_or_None, single_agent_or_None, default_agent_or_None).
|
||||
action: the ROOM_ACTION_* constant (from args[0] or subcommand)
|
||||
room_id: from kwargs["room"]
|
||||
agents: from kwargs["agents"] as a list (for set command)
|
||||
single_agent: from kwargs["agent"] (for add/remove)
|
||||
default_agent: from kwargs["default"]
|
||||
"""
|
||||
# action is args[0] when subcommand == "agents"
|
||||
action = (args[0].lower() if args else "").strip() or subcommand.lower()
|
||||
room_id = kwargs.get("room")
|
||||
|
||||
# agents= may be comma-separated
|
||||
raw_agents = kwargs.get("agents", "")
|
||||
agents: Optional[List[str]] = (
|
||||
[a.strip().lower() for a in raw_agents.split(",") if a.strip()]
|
||||
if raw_agents else None
|
||||
)
|
||||
|
||||
single_agent = kwargs.get("agent", "").strip().lower() or None
|
||||
default_agent = kwargs.get("default", "").strip().lower() or None
|
||||
return action, room_id, agents, single_agent, default_agent
|
||||
|
||||
|
||||
def room_agents_reply_set(room_id: str, agents: List[str], default_agent: str) -> str:
|
||||
agents_str = ", ".join(f"`{a}`" for a in sorted(agents))
|
||||
return (
|
||||
f"✅ Agent override set for `{room_id}`\n"
|
||||
f"Agents: {agents_str}\n"
|
||||
f"Default: `{default_agent}`"
|
||||
)
|
||||
|
||||
|
||||
def room_agents_reply_add(room_id: str, agent: str, agents: List[str], default_agent: Optional[str]) -> str:
|
||||
agents_str = ", ".join(f"`{a}`" for a in sorted(agents))
|
||||
return (
|
||||
f"✅ Agent `{agent}` added to `{room_id}`\n"
|
||||
f"Current agents: {agents_str}"
|
||||
+ (f"\nDefault: `{default_agent}`" if default_agent else "")
|
||||
)
|
||||
|
||||
|
||||
def room_agents_reply_remove(room_id: str, agent: str, agents: List[str], default_agent: Optional[str]) -> str:
|
||||
if agents:
|
||||
agents_str = ", ".join(f"`{a}`" for a in sorted(agents))
|
||||
return (
|
||||
f"✅ Agent `{agent}` removed from `{room_id}`\n"
|
||||
f"Remaining: {agents_str}"
|
||||
+ (f"\nDefault: `{default_agent}`" if default_agent else "")
|
||||
)
|
||||
return f"✅ Agent `{agent}` removed — no agents left, override cleared for `{room_id}`"
|
||||
|
||||
|
||||
def room_agents_reply_unset_ok(room_id: str) -> str:
|
||||
return f"✅ Agent override cleared for `{room_id}` (using env/default config)"
|
||||
|
||||
|
||||
def room_agents_reply_unset_not_found(room_id: str) -> str:
|
||||
return f"ℹ️ No agent override was set for `{room_id}`"
|
||||
|
||||
|
||||
def room_agents_reply_get(
|
||||
room_id: str,
|
||||
override_agents: Optional[List[str]],
|
||||
override_default: Optional[str],
|
||||
env_agents: Optional[List[str]],
|
||||
env_default: Optional[str],
|
||||
) -> str:
|
||||
lines = [f"📌 **Agent policy for** `{room_id}`"]
|
||||
if override_agents:
|
||||
agents_str = ", ".join(f"`{a}`" for a in sorted(override_agents))
|
||||
lines.append(f"Dynamic override: {agents_str} default=`{override_default or '?'}`")
|
||||
else:
|
||||
lines.append("Dynamic override: _none_")
|
||||
if env_agents:
|
||||
env_str = ", ".join(f"`{a}`" for a in sorted(env_agents))
|
||||
lines.append(f"Env config: {env_str} default=`{env_default or '?'}`")
|
||||
else:
|
||||
lines.append("Env config: _not configured_")
|
||||
effective_agents = override_agents or env_agents or []
|
||||
effective_default = override_default or env_default or "?"
|
||||
lines.append(f"\nEffective agents: **{', '.join(f'`{a}`' for a in sorted(effective_agents))}** default=**`{effective_default}`**")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def room_agents_reply_list(
|
||||
overrides: List[Tuple[str, List[str], Optional[str], int]],
|
||||
total: int,
|
||||
) -> str:
|
||||
import datetime
|
||||
lines = [f"📋 **Dynamic agent overrides** ({total} total)"]
|
||||
if not overrides:
|
||||
lines.append("_None set._")
|
||||
else:
|
||||
for room_id, agents, default_agent, updated_at in overrides:
|
||||
ts = datetime.datetime.utcfromtimestamp(updated_at).strftime("%Y-%m-%d %H:%M")
|
||||
agents_str = ", ".join(agents)
|
||||
lines.append(f" `{room_id}`: [{agents_str}] default=`{default_agent or '?'}` _(at {ts} UTC)_")
|
||||
if total > len(overrides):
|
||||
lines.append(f" _(+{total - len(overrides)} more)_")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def room_agents_reply_error(msg: str) -> str:
|
||||
return (
|
||||
f"❌ {msg}\n\n"
|
||||
"Usage:\n"
|
||||
" `!room agents set room=!X agents=sofiia,helion [default=sofiia]`\n"
|
||||
" `!room agents add room=!X agent=druid`\n"
|
||||
" `!room agents remove room=!X agent=helion`\n"
|
||||
" `!room agents get room=!X`\n"
|
||||
" `!room agents unset room=!X`\n"
|
||||
" `!room agents list`"
|
||||
)
|
||||
|
||||
|
||||
# ── M6.2: !policy export/import reply helpers + path validator ────────────────
|
||||
|
||||
import os as _os
|
||||
import json as _json
|
||||
|
||||
|
||||
POLICY_EXPORTS_SUBDIR = "policy_exports"
|
||||
|
||||
|
||||
def validate_export_path(exports_dir: str, filename: str) -> Optional[str]:
|
||||
"""
|
||||
Validate and resolve an export filename to an absolute path.
|
||||
|
||||
Security: only allow simple filenames (no slashes, no `..`).
|
||||
Returns the absolute safe path, or None if invalid.
|
||||
"""
|
||||
if not filename:
|
||||
return None
|
||||
# Reject anything with directory separators or traversal sequences
|
||||
if "/" in filename or "\\" in filename or ".." in filename:
|
||||
return None
|
||||
# Only allow safe characters: alphanumeric, dash, underscore, dot
|
||||
if not _re.match(r"^[a-zA-Z0-9._\-]+$", filename):
|
||||
return None
|
||||
full_path = _os.path.join(exports_dir, filename)
|
||||
try:
|
||||
resolved = _os.path.realpath(full_path)
|
||||
exports_resolved = _os.path.realpath(exports_dir)
|
||||
if not resolved.startswith(exports_resolved + _os.sep):
|
||||
return None
|
||||
except Exception: # noqa: BLE001
|
||||
return None
|
||||
return full_path
|
||||
|
||||
|
||||
def policy_export_reply(path: str, node_count: int, agent_count: int) -> str:
|
||||
filename = _os.path.basename(path)
|
||||
return (
|
||||
f"✅ **Policy exported**\n"
|
||||
f"File: `{filename}`\n"
|
||||
f"Node overrides: {node_count} Agent overrides: {agent_count}"
|
||||
)
|
||||
|
||||
|
||||
def policy_import_dry_run_reply(stats: dict, mode: str) -> str:
|
||||
return (
|
||||
f"🔍 **Import dry-run** (mode=`{mode}`, no changes applied)\n"
|
||||
f"Node overrides: +{stats.get('node_added',0)} ~{stats.get('node_updated',0)} -{stats.get('node_deleted',0)}\n"
|
||||
f"Agent overrides: +{stats.get('agent_added',0)} ~{stats.get('agent_updated',0)} -{stats.get('agent_deleted',0)}\n"
|
||||
f"_Use `dry_run=0` to apply._"
|
||||
)
|
||||
|
||||
|
||||
def format_import_diff(diff: Any) -> str:
|
||||
"""
|
||||
Format an ImportDiff as a human-readable Markdown string (M9.1).
|
||||
`diff` is an ImportDiff instance from policy_store.
|
||||
"""
|
||||
lines: List[str] = []
|
||||
|
||||
# Node overrides row
|
||||
node_parts: List[str] = []
|
||||
if diff.node_added: node_parts.append(f"+{diff.node_added} added")
|
||||
if diff.node_updated: node_parts.append(f"~{diff.node_updated} updated")
|
||||
if diff.node_deleted: node_parts.append(f"-{diff.node_deleted} deleted ⚠️")
|
||||
lines.append("**Node overrides:** " + (", ".join(node_parts) if node_parts else "no changes"))
|
||||
|
||||
# Agent overrides row
|
||||
agent_parts: List[str] = []
|
||||
if diff.agent_added: agent_parts.append(f"+{diff.agent_added} added")
|
||||
if diff.agent_updated: agent_parts.append(f"~{diff.agent_updated} updated")
|
||||
if diff.agent_deleted: agent_parts.append(f"-{diff.agent_deleted} deleted ⚠️")
|
||||
lines.append("**Agent overrides:** " + (", ".join(agent_parts) if agent_parts else "no changes"))
|
||||
|
||||
# Sample affected rooms
|
||||
if getattr(diff, "sample_keys", None):
|
||||
keys_str = ", ".join(f"`{k}`" for k in diff.sample_keys)
|
||||
more = diff.total_changes() - len(diff.sample_keys)
|
||||
suffix = f" _(+{more} more)_" if more > 0 else ""
|
||||
lines.append(f"**Affected rooms:** {keys_str}{suffix}")
|
||||
|
||||
# Replace danger banner
|
||||
if getattr(diff, "is_replace", False):
|
||||
lines.append("⚠️ **REPLACE mode** — existing overrides NOT in the file will be **deleted**.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def policy_import_intent_reply(
|
||||
diff: Any,
|
||||
action_summary: str,
|
||||
nonce: str,
|
||||
ttl_s: int,
|
||||
) -> str:
|
||||
"""Reply for !policy import intent with diff preview (M9.1)."""
|
||||
lines = [
|
||||
"⚠️ **Confirm required**",
|
||||
f"Action: `{action_summary}`",
|
||||
"",
|
||||
"**Preview:**",
|
||||
format_import_diff(diff),
|
||||
"",
|
||||
]
|
||||
if diff.total_changes() == 0:
|
||||
lines.append("_(No policy changes would be made.)_")
|
||||
lines.append("")
|
||||
lines += [
|
||||
f"Type `!confirm {nonce}` within {ttl_s}s to apply.",
|
||||
"_(Only you can confirm. If the file changes, this confirm will be rejected.)_",
|
||||
]
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def policy_import_reply(stats: dict, mode: str) -> str:
|
||||
return (
|
||||
f"✅ **Policy imported** (mode=`{mode}`)\n"
|
||||
f"Node overrides: +{stats.get('node_added',0)} ~{stats.get('node_updated',0)} -{stats.get('node_deleted',0)}\n"
|
||||
f"Agent overrides: +{stats.get('agent_added',0)} ~{stats.get('agent_updated',0)} -{stats.get('agent_deleted',0)}"
|
||||
)
|
||||
|
||||
|
||||
def policy_restore_intent_reply(
|
||||
diff: Any,
|
||||
action_summary: str,
|
||||
nonce: str,
|
||||
ttl_s: int,
|
||||
) -> str:
|
||||
"""Reply for !policy restore intent — rollback preview + confirm prompt (M10.1)."""
|
||||
diff_text = format_import_diff(diff)
|
||||
return (
|
||||
f"🔄 **Policy restore (rollback) preview**\n"
|
||||
f"{diff_text}\n\n"
|
||||
f"⚠️ **Rollback action:** `{action_summary}`\n\n"
|
||||
f"Type `!confirm {nonce}` to apply restore (expires in {ttl_s}s)"
|
||||
)
|
||||
|
||||
|
||||
def policy_restore_applied_reply(
|
||||
stats: Any,
|
||||
mode: str,
|
||||
autobackup_basename: str = "",
|
||||
) -> str:
|
||||
"""Reply after !policy restore is confirmed and applied (M10.1)."""
|
||||
n_a = stats.get("node_added", 0) if isinstance(stats, dict) else 0
|
||||
n_u = stats.get("node_updated", 0) if isinstance(stats, dict) else 0
|
||||
n_d = stats.get("node_deleted", 0) if isinstance(stats, dict) else 0
|
||||
a_a = stats.get("agent_added", 0) if isinstance(stats, dict) else 0
|
||||
a_u = stats.get("agent_updated", 0) if isinstance(stats, dict) else 0
|
||||
a_d = stats.get("agent_deleted", 0) if isinstance(stats, dict) else 0
|
||||
backup_line = (
|
||||
f"\n\n💾 Pre-restore backup saved: `{autobackup_basename}`"
|
||||
if autobackup_basename else ""
|
||||
)
|
||||
return (
|
||||
f"✅ **Policy restored** (mode={mode})\n"
|
||||
f"Node overrides: +{n_a} ~{n_u} -{n_d}\n"
|
||||
f"Agent overrides: +{a_a} ~{a_u} -{a_d}"
|
||||
f"{backup_line}"
|
||||
)
|
||||
|
||||
|
||||
def policy_history_reply(changes: List[Any]) -> str:
|
||||
"""
|
||||
Format policy_changes records for !policy history reply (M10.2).
|
||||
|
||||
Each line: #{n}. [id:NN] [YYYY-MM-DD HH:MM] verb/mode +Xn ~Yn -Zn `file` op:`hash8` [⚠️]
|
||||
Use !policy change id=NN to see full details.
|
||||
"""
|
||||
if not changes:
|
||||
return "📋 **Policy change history**\nNo policy changes recorded yet."
|
||||
lines = ["📋 **Policy change history** (most recent first)\n"]
|
||||
for i, c in enumerate(changes, 1):
|
||||
destr_flag = " ⚠️" if c.is_destructive else ""
|
||||
fname = c.source_file[:40] + "…" if len(c.source_file) > 40 else c.source_file
|
||||
line = (
|
||||
f"{i}. [id:{c.id}] [{c.when_str()}] `{c.verb}/{c.mode}`"
|
||||
f" {c.changes_short()}{destr_flag}"
|
||||
f" `{fname}`"
|
||||
f" op:`{c.sender_hash[:8]}`"
|
||||
)
|
||||
lines.append(line)
|
||||
lines.append("\nUse `!policy change id=<n>` for full details of a specific change.")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def policy_change_detail_reply(change: Any) -> str:
|
||||
"""
|
||||
Format full details of a single PolicyChange for !policy change id=<n> (M10.3).
|
||||
"""
|
||||
destr_str = "⚠️ Yes" if change.is_destructive else "No"
|
||||
fname = change.source_file[:60] + "…" if len(change.source_file) > 60 else change.source_file
|
||||
lines = [
|
||||
f"🔍 **Policy change #{change.id}**\n",
|
||||
f"**Verb:** `{change.verb}`",
|
||||
f"**Mode:** `{change.mode}`",
|
||||
f"**Applied:** {change.when_str()} UTC",
|
||||
f"**Operator:** op:`{change.sender_hash[:8]}`",
|
||||
f"**File:** `{fname}`",
|
||||
f"**Destructive:** {destr_str}",
|
||||
"",
|
||||
"**Changes:**",
|
||||
f" Nodes: +{change.node_added} added ~{change.node_updated} updated -{change.node_deleted} deleted",
|
||||
f" Agents: +{change.agent_added} added ~{change.agent_updated} updated -{change.agent_deleted} deleted",
|
||||
"",
|
||||
"**Summary:**",
|
||||
f" {change.diff_summary}",
|
||||
]
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def policy_prune_preview_reply(result: Any, retention_days: int) -> str:
|
||||
"""Reply for !policy prune_exports dry_run=1 — preview of what would be pruned (M10.0)."""
|
||||
if result.count == 0:
|
||||
return (
|
||||
f"🗑️ **Policy exports prune preview** (retention={retention_days}d)\n"
|
||||
"No files older than the retention window found. Nothing to prune."
|
||||
)
|
||||
samples = result.sample_filenames(5)
|
||||
sample_str = "\n".join(f" - `{f}`" for f in samples)
|
||||
more = result.count - len(samples)
|
||||
more_str = f"\n _(+{more} more)_" if more > 0 else ""
|
||||
size_kb = result.total_bytes // 1024
|
||||
return (
|
||||
f"🗑️ **Policy exports prune preview** (retention={retention_days}d)\n"
|
||||
f"Would delete **{result.count}** file(s) (~{size_kb} KB):\n"
|
||||
f"{sample_str}{more_str}\n\n"
|
||||
f"To actually prune: `!policy prune_exports dry_run=0`"
|
||||
)
|
||||
|
||||
|
||||
def policy_prune_applied_reply(result: Any, retention_days: int) -> str:
|
||||
"""Reply after !policy prune_exports dry_run=0 is confirmed and applied (M10.0)."""
|
||||
if result.count == 0:
|
||||
return (
|
||||
f"🗑️ **Policy exports pruned** (retention={retention_days}d)\n"
|
||||
"No files matched the retention window."
|
||||
)
|
||||
size_kb = result.total_bytes // 1024
|
||||
return (
|
||||
f"✅ **Policy exports pruned** (retention={retention_days}d)\n"
|
||||
f"Deleted **{result.count}** file(s) (~{size_kb} KB freed)."
|
||||
)
|
||||
|
||||
|
||||
def policy_cmd_error(msg: str) -> str:
|
||||
return (
|
||||
f"❌ {msg}\n\n"
|
||||
"Usage:\n"
|
||||
" `!policy export`\n"
|
||||
" `!policy import path=policy-YYYYMMDD-HHMMSS.json [mode=merge|replace] [dry_run=0]`"
|
||||
)
|
||||
|
||||
|
||||
@@ -252,12 +1162,26 @@ def help_reply() -> str:
|
||||
"""Brief help text."""
|
||||
return (
|
||||
"**DAGI Bridge — Control Commands**\n\n"
|
||||
"`!runbook start <path> [node=NODA1]` — Start a runbook run\n"
|
||||
"`!runbook next <run_id>` — Advance to next step\n"
|
||||
"`!runbook complete <run_id> step=<n> status=ok` — Mark step complete\n"
|
||||
"`!runbook evidence <run_id>` — Get evidence artifact path\n"
|
||||
"`!runbook status <run_id>` — Show current run state\n"
|
||||
"`!status` — Bridge health summary\n"
|
||||
"`!runbook start <path> [node=NODA1]` — Start a runbook run ✅\n"
|
||||
"`!runbook next <run_id>` — Advance to next step ✅\n"
|
||||
"`!runbook complete <run_id> step=<n> status=ok [notes=...]` — Mark step complete ✅\n"
|
||||
"`!runbook status <run_id>` — Show run status ✅\n"
|
||||
"`!runbook evidence <run_id>` — Generate release evidence ✅\n"
|
||||
"`!runbook post_review <run_id>` — Generate post-release review ✅\n"
|
||||
"`!status` — Bridge health summary ✅\n"
|
||||
"`!nodes` — Node policy overview ✅\n"
|
||||
"`!node set room=!room:server node=NODA2` — Set room-node override ✅\n"
|
||||
"`!node unset room=!room:server` — Remove room-node override ✅\n"
|
||||
"`!node get room=!room:server` — Show current override ✅\n"
|
||||
"`!node list` — List dynamic overrides (top 10) ✅\n"
|
||||
"`!room agents set room=!X agents=sofiia,helion [default=sofiia]` — Set agent list ✅\n"
|
||||
"`!room agents add room=!X agent=druid` — Add agent to room ✅\n"
|
||||
"`!room agents remove room=!X agent=helion` — Remove agent from room ✅\n"
|
||||
"`!room agents get room=!X` — Show current agent policy ✅\n"
|
||||
"`!room agents list` — List all rooms with agent overrides ✅\n"
|
||||
"`!room agents unset room=!X` — Remove all agent overrides for room ✅\n"
|
||||
"`!policy export` — Export policy snapshot to file ✅\n"
|
||||
"`!policy import path=<file> [mode=merge|replace] [dry_run=0]` — Import policy snapshot ✅\n"
|
||||
"`!help` — This message\n\n"
|
||||
"_Only authorised operators can issue control commands._"
|
||||
)
|
||||
|
||||
138
services/matrix-bridge-dagi/app/control_limiter.py
Normal file
138
services/matrix-bridge-dagi/app/control_limiter.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""
|
||||
control_limiter — M3.4: Rate limiting + cooldown for Matrix control channel.
|
||||
|
||||
Protection layers:
|
||||
1. Per-room sliding window — CONTROL_ROOM_RPM (default 60)
|
||||
2. Per-operator sliding window — CONTROL_OPERATOR_RPM (default 30)
|
||||
3. Per-run sliding window — CONTROL_RUN_NEXT_RPM (default 20, only !runbook next)
|
||||
4. Per-operator cooldown — CONTROL_COOLDOWN_S (default 2s, anti-double-click)
|
||||
|
||||
All state is in-memory (lost on restart), which is intentional — limits reset with the bridge.
|
||||
|
||||
Thread safety: not needed (asyncio single-threaded event loop).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from collections import defaultdict, deque
|
||||
from typing import Dict, Tuple
|
||||
|
||||
|
||||
# Sentinel value for "unlimited" (rpm == 0 → skip check)
|
||||
_UNLIMITED = 0
|
||||
|
||||
|
||||
class ControlRateLimiter:
|
||||
"""
|
||||
Sliding-window rate limiter + cooldown for the Matrix control channel.
|
||||
|
||||
All rpm values are requests-per-minute over a 60-second rolling window.
|
||||
cooldown_s is a per-{operator, verb, subcommand} debounce window (anti-double-click).
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
room_rpm: int = 60,
|
||||
operator_rpm: int = 30,
|
||||
run_next_rpm: int = 20,
|
||||
cooldown_s: float = 2.0,
|
||||
) -> None:
|
||||
self.room_rpm = room_rpm
|
||||
self.operator_rpm = operator_rpm
|
||||
self.run_next_rpm = run_next_rpm
|
||||
self.cooldown_s = cooldown_s
|
||||
|
||||
# Sliding-window storage: key → deque[float] (monotonic timestamps)
|
||||
self._room_windows: Dict[str, deque] = defaultdict(deque)
|
||||
self._op_windows: Dict[str, deque] = defaultdict(deque)
|
||||
self._run_windows: Dict[str, deque] = defaultdict(deque)
|
||||
|
||||
# Cooldown: (sender_hash, verb, subcommand) → last accepted timestamp
|
||||
self._cooldown_times: Dict[str, float] = {}
|
||||
|
||||
# ── Sliding window helpers ─────────────────────────────────────────────────
|
||||
|
||||
@staticmethod
|
||||
def _check_window(
|
||||
windows: Dict[str, deque],
|
||||
key: str,
|
||||
rpm: int,
|
||||
) -> Tuple[bool, float]:
|
||||
"""
|
||||
Sliding-window check over a 60-second window.
|
||||
|
||||
Returns (allowed, retry_after_seconds).
|
||||
If rpm == 0, always allowed.
|
||||
"""
|
||||
if rpm == _UNLIMITED:
|
||||
return True, 0.0
|
||||
|
||||
now = time.monotonic()
|
||||
window = windows[key]
|
||||
cutoff = now - 60.0
|
||||
|
||||
# Evict expired entries
|
||||
while window and window[0] < cutoff:
|
||||
window.popleft()
|
||||
|
||||
if len(window) >= rpm:
|
||||
# Time until oldest entry expires
|
||||
retry_after = max(0.0, 60.0 - (now - window[0]))
|
||||
return False, retry_after
|
||||
|
||||
window.append(now)
|
||||
return True, 0.0
|
||||
|
||||
# ── Public check methods ───────────────────────────────────────────────────
|
||||
|
||||
def check_room(self, room_id: str) -> Tuple[bool, float]:
|
||||
"""Per-room rate limit check. Returns (allowed, retry_after_s)."""
|
||||
return self._check_window(self._room_windows, room_id, self.room_rpm)
|
||||
|
||||
def check_operator(self, sender_hash: str) -> Tuple[bool, float]:
|
||||
"""Per-operator rate limit check. Returns (allowed, retry_after_s)."""
|
||||
return self._check_window(self._op_windows, sender_hash, self.operator_rpm)
|
||||
|
||||
def check_run_next(self, run_id: str) -> Tuple[bool, float]:
|
||||
"""
|
||||
Per-run rate limit for !runbook next — prevents rapid-fire advancement.
|
||||
Returns (allowed, retry_after_s).
|
||||
"""
|
||||
return self._check_window(self._run_windows, run_id, self.run_next_rpm)
|
||||
|
||||
def check_cooldown(
|
||||
self,
|
||||
sender_hash: str,
|
||||
verb: str,
|
||||
subcommand: str,
|
||||
) -> Tuple[bool, float]:
|
||||
"""
|
||||
Anti-double-click cooldown per (operator, verb, subcommand).
|
||||
|
||||
Returns (allowed, wait_s). On first call → records timestamp and allows.
|
||||
On subsequent calls within cooldown_s → blocks and returns remaining wait.
|
||||
"""
|
||||
if self.cooldown_s <= 0:
|
||||
return True, 0.0
|
||||
|
||||
key = f"{sender_hash}:{verb}:{subcommand}"
|
||||
now = time.monotonic()
|
||||
last = self._cooldown_times.get(key)
|
||||
|
||||
if last is not None:
|
||||
elapsed = now - last
|
||||
if elapsed < self.cooldown_s:
|
||||
return False, self.cooldown_s - elapsed
|
||||
|
||||
self._cooldown_times[key] = now
|
||||
return True, 0.0
|
||||
|
||||
# ── Summary ───────────────────────────────────────────────────────────────
|
||||
|
||||
def as_health_dict(self) -> dict:
|
||||
return {
|
||||
"room_rpm": self.room_rpm,
|
||||
"operator_rpm": self.operator_rpm,
|
||||
"run_next_rpm": self.run_next_rpm,
|
||||
"cooldown_s": self.cooldown_s,
|
||||
}
|
||||
296
services/matrix-bridge-dagi/app/control_runner.py
Normal file
296
services/matrix-bridge-dagi/app/control_runner.py
Normal file
@@ -0,0 +1,296 @@
|
||||
"""
|
||||
control_runner — M3.1 + M3.2 + M3.3
|
||||
|
||||
Thin async HTTP client that calls the sofiia-console internal runbook API
|
||||
on behalf of the Matrix bridge control channel.
|
||||
|
||||
All functions are stateless; callers supply the pre-built AsyncClient.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Runbook path guards (fail-fast in the bridge, before calling the console)
|
||||
_MAX_PATH_LEN = 256
|
||||
_FORBIDDEN_SEGMENTS = {"..", "~"}
|
||||
|
||||
|
||||
class RunnerError(Exception):
|
||||
"""Raised when the sofiia-console returns an error or call fails."""
|
||||
|
||||
|
||||
def validate_runbook_path(path: str) -> Optional[str]:
|
||||
"""
|
||||
Return None if valid, or an error string describing the problem.
|
||||
Checks: non-empty, max length, no traversal segments, no absolute paths.
|
||||
"""
|
||||
path = path.strip()
|
||||
if not path:
|
||||
return "runbook_path is required"
|
||||
if len(path) > _MAX_PATH_LEN:
|
||||
return f"runbook_path too long (max {_MAX_PATH_LEN} chars)"
|
||||
if path.startswith("/"):
|
||||
return "absolute paths are not allowed"
|
||||
parts = path.replace("\\", "/").split("/")
|
||||
for part in parts:
|
||||
if part in _FORBIDDEN_SEGMENTS:
|
||||
return f"forbidden path segment: {part!r}"
|
||||
return None
|
||||
|
||||
|
||||
async def start_runbook_run(
|
||||
http_client: httpx.AsyncClient,
|
||||
console_url: str,
|
||||
control_token: str,
|
||||
runbook_path: str,
|
||||
operator_id: str,
|
||||
node_id: str = "NODA1",
|
||||
timeout: float = 15.0,
|
||||
) -> dict:
|
||||
"""
|
||||
POST /api/runbooks/internal/runs → {run_id, status, current_step, steps_total}
|
||||
|
||||
Raises RunnerError on HTTP error or non-2xx response.
|
||||
"""
|
||||
url = f"{console_url.rstrip('/')}/api/runbooks/internal/runs"
|
||||
payload: dict = {
|
||||
"runbook_path": runbook_path,
|
||||
"operator_id": operator_id,
|
||||
"node_id": node_id,
|
||||
}
|
||||
|
||||
try:
|
||||
resp = await http_client.post(
|
||||
url,
|
||||
json=payload,
|
||||
headers={"X-Control-Token": control_token},
|
||||
timeout=timeout,
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise RunnerError(f"connection error: {exc}") from exc
|
||||
|
||||
if resp.status_code != 200:
|
||||
detail = _extract_error_detail(resp)
|
||||
raise RunnerError(f"HTTP {resp.status_code}: {detail}")
|
||||
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception as exc:
|
||||
raise RunnerError(f"invalid JSON response: {exc}") from exc
|
||||
|
||||
|
||||
def _extract_error_detail(resp: httpx.Response) -> str:
|
||||
"""Extract a short error detail from an httpx response (safe: never raises)."""
|
||||
try:
|
||||
body = resp.json()
|
||||
if isinstance(body, dict) and body.get("detail"):
|
||||
return str(body["detail"])[:200]
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
return (resp.text or "")[:200]
|
||||
except Exception:
|
||||
return "<no detail>"
|
||||
|
||||
|
||||
async def get_runbook_run(
|
||||
http_client: httpx.AsyncClient,
|
||||
console_url: str,
|
||||
control_token: str,
|
||||
run_id: str,
|
||||
timeout: float = 10.0,
|
||||
) -> dict:
|
||||
"""
|
||||
GET /api/runbooks/internal/runs/{run_id} → full run with steps.
|
||||
"""
|
||||
url = f"{console_url.rstrip('/')}/api/runbooks/internal/runs/{run_id}"
|
||||
|
||||
try:
|
||||
resp = await http_client.get(
|
||||
url,
|
||||
headers={"X-Control-Token": control_token},
|
||||
timeout=timeout,
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise RunnerError(f"connection error: {exc}") from exc
|
||||
|
||||
if resp.status_code == 404:
|
||||
raise RunnerError(f"run {run_id!r} not found")
|
||||
if resp.status_code != 200:
|
||||
raise RunnerError(f"HTTP {resp.status_code}: {_extract_error_detail(resp)}")
|
||||
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception as exc:
|
||||
raise RunnerError(f"invalid JSON response: {exc}") from exc
|
||||
|
||||
|
||||
# ── M3.2 ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def next_runbook_step(
|
||||
http_client: httpx.AsyncClient,
|
||||
console_url: str,
|
||||
control_token: str,
|
||||
run_id: str,
|
||||
operator_id: str = "",
|
||||
timeout: float = 30.0,
|
||||
) -> dict:
|
||||
"""
|
||||
POST /api/runbooks/internal/runs/{run_id}/next
|
||||
|
||||
Returns either:
|
||||
{type:"manual", step_index, title, section, instructions_md, steps_total?}
|
||||
{type:"http_check"|"script", step_index, title, result, step_status, next_step, completed}
|
||||
|
||||
Raises RunnerError on HTTP error, 404 (run not found / not active).
|
||||
"""
|
||||
url = f"{console_url.rstrip('/')}/api/runbooks/internal/runs/{run_id}/next"
|
||||
payload = {"operator_id": operator_id} if operator_id else {}
|
||||
|
||||
try:
|
||||
resp = await http_client.post(
|
||||
url,
|
||||
json=payload,
|
||||
headers={"X-Control-Token": control_token},
|
||||
timeout=timeout,
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise RunnerError(f"connection error: {exc}") from exc
|
||||
|
||||
if resp.status_code == 404:
|
||||
detail = _extract_error_detail(resp)
|
||||
raise RunnerError(f"run not found or not active: {detail}")
|
||||
if resp.status_code != 200:
|
||||
raise RunnerError(f"HTTP {resp.status_code}: {_extract_error_detail(resp)}")
|
||||
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception as exc:
|
||||
raise RunnerError(f"invalid JSON response: {exc}") from exc
|
||||
|
||||
|
||||
async def complete_runbook_step(
|
||||
http_client: httpx.AsyncClient,
|
||||
console_url: str,
|
||||
control_token: str,
|
||||
run_id: str,
|
||||
step_index: int,
|
||||
status: str,
|
||||
notes: str = "",
|
||||
operator_id: str = "",
|
||||
timeout: float = 15.0,
|
||||
) -> dict:
|
||||
"""
|
||||
POST /api/runbooks/internal/runs/{run_id}/steps/{step_index}/complete
|
||||
|
||||
Returns: {ok, run_id, step_index, status, next_step, steps_total, run_completed}
|
||||
|
||||
Raises RunnerError on HTTP error or 404 (run/step not found or wrong current step).
|
||||
"""
|
||||
url = (
|
||||
f"{console_url.rstrip('/')}/api/runbooks/internal/runs/{run_id}"
|
||||
f"/steps/{step_index}/complete"
|
||||
)
|
||||
payload: dict = {"status": status}
|
||||
if notes:
|
||||
payload["notes"] = notes
|
||||
if operator_id:
|
||||
payload["operator_id"] = operator_id
|
||||
|
||||
try:
|
||||
resp = await http_client.post(
|
||||
url,
|
||||
json=payload,
|
||||
headers={"X-Control-Token": control_token},
|
||||
timeout=timeout,
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise RunnerError(f"connection error: {exc}") from exc
|
||||
|
||||
if resp.status_code == 404:
|
||||
detail = _extract_error_detail(resp)
|
||||
raise RunnerError(f"step not found or not current: {detail}")
|
||||
if resp.status_code != 200:
|
||||
raise RunnerError(f"HTTP {resp.status_code}: {_extract_error_detail(resp)}")
|
||||
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception as exc:
|
||||
raise RunnerError(f"invalid JSON response: {exc}") from exc
|
||||
|
||||
|
||||
# ── M3.3 ──────────────────────────────────────────────────────────────────────
|
||||
|
||||
async def generate_evidence(
|
||||
http_client: httpx.AsyncClient,
|
||||
console_url: str,
|
||||
control_token: str,
|
||||
run_id: str,
|
||||
timeout: float = 30.0,
|
||||
) -> dict:
|
||||
"""
|
||||
POST /api/runbooks/internal/runs/{run_id}/evidence
|
||||
|
||||
Returns: {evidence_path, bytes, created_at, run_id}
|
||||
|
||||
Raises RunnerError on HTTP error or 404 (run not found).
|
||||
"""
|
||||
url = f"{console_url.rstrip('/')}/api/runbooks/internal/runs/{run_id}/evidence"
|
||||
try:
|
||||
resp = await http_client.post(
|
||||
url,
|
||||
headers={"X-Control-Token": control_token},
|
||||
timeout=timeout,
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise RunnerError(f"connection error: {exc}") from exc
|
||||
|
||||
if resp.status_code == 404:
|
||||
raise RunnerError(f"run {run_id!r} not found")
|
||||
if resp.status_code != 200:
|
||||
raise RunnerError(f"HTTP {resp.status_code}: {_extract_error_detail(resp)}")
|
||||
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception as exc:
|
||||
raise RunnerError(f"invalid JSON response: {exc}") from exc
|
||||
|
||||
|
||||
async def generate_post_review(
|
||||
http_client: httpx.AsyncClient,
|
||||
console_url: str,
|
||||
control_token: str,
|
||||
run_id: str,
|
||||
timeout: float = 30.0,
|
||||
) -> dict:
|
||||
"""
|
||||
POST /api/runbooks/internal/runs/{run_id}/post_review
|
||||
|
||||
Returns: {path, bytes, created_at, run_id}
|
||||
|
||||
Raises RunnerError on HTTP error or 404.
|
||||
"""
|
||||
url = f"{console_url.rstrip('/')}/api/runbooks/internal/runs/{run_id}/post_review"
|
||||
try:
|
||||
resp = await http_client.post(
|
||||
url,
|
||||
headers={"X-Control-Token": control_token},
|
||||
timeout=timeout,
|
||||
)
|
||||
except httpx.RequestError as exc:
|
||||
raise RunnerError(f"connection error: {exc}") from exc
|
||||
|
||||
if resp.status_code == 404:
|
||||
raise RunnerError(f"run {run_id!r} not found")
|
||||
if resp.status_code != 200:
|
||||
raise RunnerError(f"HTTP {resp.status_code}: {_extract_error_detail(resp)}")
|
||||
|
||||
try:
|
||||
return resp.json()
|
||||
except Exception as exc:
|
||||
raise RunnerError(f"invalid JSON response: {exc}") from exc
|
||||
210
services/matrix-bridge-dagi/app/discovery.py
Normal file
210
services/matrix-bridge-dagi/app/discovery.py
Normal file
@@ -0,0 +1,210 @@
|
||||
"""
|
||||
discovery — M4.0: Agent discovery helpers for Matrix user rooms.
|
||||
|
||||
Provides formatted replies for `!agents` and `!agents status` commands.
|
||||
These commands are available to all room members (no auth required) and
|
||||
are processed BEFORE routing to the LLM agent.
|
||||
|
||||
Supports:
|
||||
- Mixed rooms: list all agents, default, usage examples
|
||||
- Direct rooms: show single agent mapping
|
||||
- Unknown rooms: "no mapping" notice
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
from typing import Optional
|
||||
|
||||
from .mixed_routing import MixedRoomConfig
|
||||
from .room_mapping import RoomMappingConfig # noqa: F401 — used in type hints
|
||||
|
||||
|
||||
def _fmt_ts(ts: int) -> str:
|
||||
"""Format a Unix timestamp as compact UTC string."""
|
||||
try:
|
||||
return datetime.datetime.fromtimestamp(ts, tz=datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
|
||||
except Exception: # noqa: BLE001
|
||||
return str(ts)
|
||||
|
||||
# Discovery command prefix
|
||||
DISCOVERY_CMD = "!agents"
|
||||
|
||||
# Reply length cap (Matrix message, not truncated — kept short by design)
|
||||
_MAX_REPLY_LEN = 3500
|
||||
|
||||
|
||||
def is_discovery_message(text: str) -> bool:
|
||||
"""Return True if the message is a !agents discovery command."""
|
||||
lower = text.strip().lower()
|
||||
return lower == DISCOVERY_CMD or lower.startswith(DISCOVERY_CMD + " ")
|
||||
|
||||
|
||||
def agents_reply(
|
||||
room_id: str,
|
||||
room_map: RoomMappingConfig,
|
||||
mixed_room_config: Optional[MixedRoomConfig],
|
||||
) -> str:
|
||||
"""
|
||||
Build a discovery reply for the given room.
|
||||
|
||||
Mixed room → list agents, default, usage examples.
|
||||
Direct room → single agent mapping.
|
||||
Unknown → 'no mapping' notice.
|
||||
"""
|
||||
# Mixed room check first
|
||||
if mixed_room_config and mixed_room_config.is_mixed(room_id):
|
||||
room = mixed_room_config.rooms.get(room_id)
|
||||
if room is not None:
|
||||
return _mixed_room_reply(room_id, room)
|
||||
|
||||
# Direct room check
|
||||
agent_id = room_map.agent_for_room(room_id)
|
||||
if agent_id is not None:
|
||||
return _direct_room_reply(agent_id)
|
||||
|
||||
return _unknown_room_reply()
|
||||
|
||||
|
||||
def _mixed_room_reply(room_id: str, room) -> str:
|
||||
"""Format reply for a mixed room."""
|
||||
agents = room.agents
|
||||
default = room.default_agent or (agents[0] if agents else "?")
|
||||
agent_list = ", ".join(f"**{a}**" for a in agents)
|
||||
|
||||
lines = [
|
||||
f"🤖 **Agents available in this room:** {agent_list}",
|
||||
f"⭐ **Default:** {default}",
|
||||
"",
|
||||
"**How to address an agent:**",
|
||||
]
|
||||
for agent in agents[:5]: # show max 5 examples
|
||||
lines.append(f" • `/{agent} <message>` — slash command")
|
||||
lines.append(f" • `@{agent} <message>` — mention")
|
||||
lines.append(f" • `{agent}: <message>` — colon prefix")
|
||||
lines.extend([
|
||||
"",
|
||||
f"_Messages without prefix go to **{default}** by default._",
|
||||
])
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def _direct_room_reply(agent_id: str) -> str:
|
||||
"""Format reply for a directly-mapped room (1 agent)."""
|
||||
return (
|
||||
f"🤖 This room is mapped to agent: **{agent_id}**\n\n"
|
||||
f"All messages are forwarded to **{agent_id}** automatically.\n"
|
||||
f"No prefix needed — just write your message."
|
||||
)
|
||||
|
||||
|
||||
def _unknown_room_reply() -> str:
|
||||
"""Format reply when room has no mapping."""
|
||||
return (
|
||||
"⚠️ This room has no agent mapping.\n\n"
|
||||
"Contact an operator to configure an agent for this room."
|
||||
)
|
||||
|
||||
|
||||
# ── Bridge status reply (M4.1) ────────────────────────────────────────────────
|
||||
|
||||
def bridge_status_reply(snapshot: dict) -> str:
|
||||
"""
|
||||
Format a concise bridge health snapshot for `!status` in control room.
|
||||
|
||||
snapshot keys (all optional with defaults):
|
||||
node_id, queue_size, queue_max, worker_count,
|
||||
room_count, mixed_room_count, operators_count,
|
||||
control_safety (dict), persistent_dedupe (dict),
|
||||
dedupe_hits, dedupe_inserts
|
||||
"""
|
||||
node_id = snapshot.get("node_id", "?")
|
||||
q_size = snapshot.get("queue_size", "?")
|
||||
q_max = snapshot.get("queue_max", "?")
|
||||
workers = snapshot.get("worker_count", "?")
|
||||
rooms = snapshot.get("room_count", 0)
|
||||
mixed = snapshot.get("mixed_room_count", 0)
|
||||
ops = snapshot.get("operators_count", 0)
|
||||
|
||||
safety = snapshot.get("control_safety") or {}
|
||||
dedupe = snapshot.get("persistent_dedupe") or {}
|
||||
|
||||
node_policy = snapshot.get("node_policy") or {}
|
||||
default_node = node_policy.get("default_node", node_id)
|
||||
allowed_nodes = node_policy.get("allowed_nodes") or []
|
||||
room_overrides = node_policy.get("room_overrides", 0)
|
||||
|
||||
lines = [
|
||||
f"📡 **Bridge status** — node: `{node_id}`",
|
||||
"",
|
||||
f"**Queue:** {q_size}/{q_max} workers: {workers}",
|
||||
f"**Rooms:** {rooms} direct {mixed} mixed ops: {ops} operators",
|
||||
"",
|
||||
]
|
||||
|
||||
# M5.0: node policy
|
||||
if allowed_nodes:
|
||||
allowed_str = ", ".join(f"`{n}`" for n in sorted(allowed_nodes))
|
||||
lines.append(
|
||||
f"**Node policy:** default=`{default_node}` allowed={allowed_str} room_overrides={room_overrides}"
|
||||
)
|
||||
|
||||
# Control safety
|
||||
if safety:
|
||||
enabled = "✅" if safety.get("enabled") else "⬜"
|
||||
lines.append(
|
||||
f"**Control safety {enabled}:** "
|
||||
f"room={safety.get('room_rpm', '?')}rpm "
|
||||
f"op={safety.get('operator_rpm', '?')}rpm "
|
||||
f"cooldown={safety.get('cooldown_s', '?')}s"
|
||||
)
|
||||
|
||||
# Persistent dedupe
|
||||
if dedupe:
|
||||
ok_emoji = "✅" if dedupe.get("ok") else "❌"
|
||||
pruned = dedupe.get("pruned_rows_last", 0)
|
||||
ttl = dedupe.get("ttl_h", "?")
|
||||
lines.append(
|
||||
f"**Dedupe {ok_emoji}:** ttl={ttl}h pruned_last={pruned} "
|
||||
f"db=`{dedupe.get('db_path') or 'n/a'}`"
|
||||
)
|
||||
|
||||
# M6.0/M6.1: policy store status
|
||||
ps = snapshot.get("policy_store") or {}
|
||||
if ps:
|
||||
ps_ok = "✅" if ps.get("ok") else "❌"
|
||||
ps_node_count = ps.get("overrides_count", 0)
|
||||
ps_agent_count = ps.get("agent_overrides_count", snapshot.get("policy_agent_overrides_count", 0))
|
||||
ps_path = ps.get("policy_store_path") or ps.get("path") or "n/a"
|
||||
lines.append(
|
||||
f"**Policy store {ps_ok}:** node_overrides={ps_node_count} "
|
||||
f"agent_overrides={ps_agent_count} db=`{ps_path}`"
|
||||
)
|
||||
|
||||
# M6.2: last export/import timestamps + DB mtime
|
||||
_last_export = snapshot.get("policy_last_export_at")
|
||||
_last_import = snapshot.get("policy_last_import_at")
|
||||
_db_mtime = snapshot.get("policy_db_mtime")
|
||||
_snap_parts: list = []
|
||||
if _last_export:
|
||||
_snap_parts.append(f"last_export=`{_fmt_ts(_last_export)}`")
|
||||
if _last_import:
|
||||
_snap_parts.append(f"last_import=`{_fmt_ts(_last_import)}`")
|
||||
if _db_mtime:
|
||||
_snap_parts.append(f"db_mtime=`{_fmt_ts(_db_mtime)}`")
|
||||
if _snap_parts:
|
||||
lines.append("**Policy snapshots:** " + " ".join(_snap_parts))
|
||||
|
||||
# M5.1: per-node routed/rejected breakdown
|
||||
node_stats = snapshot.get("nodes") or {}
|
||||
if node_stats:
|
||||
lines.append("\n**Per-node stats:**")
|
||||
for nid in sorted(node_stats):
|
||||
ns = node_stats[nid]
|
||||
lines.append(
|
||||
f" `{nid}`: routed={ns.get('routed', 0)} rejected={ns.get('rejected', 0)}"
|
||||
)
|
||||
|
||||
reply = "\n".join(lines)
|
||||
if len(reply) > _MAX_REPLY_LEN:
|
||||
reply = reply[:_MAX_REPLY_LEN - 3] + "…"
|
||||
return reply
|
||||
213
services/matrix-bridge-dagi/app/event_store.py
Normal file
213
services/matrix-bridge-dagi/app/event_store.py
Normal file
@@ -0,0 +1,213 @@
|
||||
"""
|
||||
event_store — M2.3: Persistent event deduplication via SQLite.
|
||||
|
||||
Stores processed Matrix event_ids so that bridge restarts do not reprocess
|
||||
events still returned by /sync (within TTL window).
|
||||
|
||||
Schema:
|
||||
processed_events (room_id, event_id, ts, sender_hash)
|
||||
PK: (room_id, event_id)
|
||||
IDX: idx_processed_events_ts (ts)
|
||||
|
||||
Design notes:
|
||||
- Uses aiosqlite for non-blocking async access from the ingress event loop.
|
||||
- Prune is best-effort: failures are logged but do NOT abort processing.
|
||||
- If the DB is unavailable (init error, corruption), EventStore degrades to
|
||||
a no-op: is_processed() returns False, mark_processed() is a no-op.
|
||||
The in-memory LRU dedupe (H1) continues to protect within a single run.
|
||||
- WAL mode is enabled for better concurrent read performance.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple
|
||||
|
||||
try:
|
||||
import aiosqlite
|
||||
_AIOSQLITE_OK = True
|
||||
except ImportError: # pragma: no cover
|
||||
aiosqlite = None # type: ignore
|
||||
_AIOSQLITE_OK = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_SCHEMA = """
|
||||
CREATE TABLE IF NOT EXISTS processed_events (
|
||||
room_id TEXT NOT NULL,
|
||||
event_id TEXT NOT NULL,
|
||||
ts INTEGER NOT NULL,
|
||||
sender_hash TEXT,
|
||||
PRIMARY KEY (room_id, event_id)
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS idx_processed_events_ts ON processed_events (ts);
|
||||
"""
|
||||
|
||||
|
||||
class EventStore:
|
||||
"""
|
||||
Async SQLite-backed deduplication store for Matrix event_ids.
|
||||
|
||||
Usage:
|
||||
store = EventStore("/app/data/matrix_bridge.db", ttl_h=48)
|
||||
await store.open()
|
||||
...
|
||||
hit = await store.is_processed(room_id, event_id)
|
||||
if not hit:
|
||||
await store.mark_processed(room_id, event_id, sender_hash)
|
||||
...
|
||||
pruned = await store.prune(batch=5000)
|
||||
await store.close()
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
db_path: str,
|
||||
ttl_h: int = 48,
|
||||
prune_batch: int = 5000,
|
||||
) -> None:
|
||||
self.db_path = db_path
|
||||
self.ttl_h = ttl_h
|
||||
self.prune_batch = prune_batch
|
||||
self._db: Optional["aiosqlite.Connection"] = None
|
||||
self._ok: bool = False
|
||||
self._last_prune_at: Optional[float] = None
|
||||
self._pruned_rows_last: int = 0
|
||||
|
||||
# ── Lifecycle ─────────────────────────────────────────────────────────────
|
||||
|
||||
async def open(self) -> bool:
|
||||
"""
|
||||
Open the SQLite connection and apply schema.
|
||||
|
||||
Returns True on success; False on failure (degraded mode).
|
||||
"""
|
||||
if not _AIOSQLITE_OK:
|
||||
logger.warning("aiosqlite not available — persistent dedupe disabled")
|
||||
return False
|
||||
try:
|
||||
Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
self._db = await aiosqlite.connect(self.db_path)
|
||||
# WAL mode: better concurrent read, non-blocking writes
|
||||
await self._db.execute("PRAGMA journal_mode=WAL")
|
||||
await self._db.execute("PRAGMA synchronous=NORMAL")
|
||||
await self._db.executescript(_SCHEMA)
|
||||
await self._db.commit()
|
||||
self._ok = True
|
||||
logger.info("EventStore opened: %s (ttl_h=%d)", self.db_path, self.ttl_h)
|
||||
return True
|
||||
except Exception as exc:
|
||||
logger.error("EventStore.open failed — degraded: %s", exc)
|
||||
self._ok = False
|
||||
return False
|
||||
|
||||
async def close(self) -> None:
|
||||
"""Close the SQLite connection gracefully."""
|
||||
if self._db is not None:
|
||||
try:
|
||||
await self._db.close()
|
||||
except Exception as exc: # pragma: no cover
|
||||
logger.warning("EventStore.close error: %s", exc)
|
||||
self._db = None
|
||||
self._ok = False
|
||||
|
||||
# ── Core operations ───────────────────────────────────────────────────────
|
||||
|
||||
async def is_processed(self, room_id: str, event_id: str) -> bool:
|
||||
"""
|
||||
Return True if (room_id, event_id) has already been processed.
|
||||
|
||||
Safe to call even when degraded (returns False → no false deduplication).
|
||||
"""
|
||||
if not self._ok or self._db is None:
|
||||
return False
|
||||
try:
|
||||
async with self._db.execute(
|
||||
"SELECT 1 FROM processed_events WHERE room_id=? AND event_id=? LIMIT 1",
|
||||
(room_id, event_id),
|
||||
) as cursor:
|
||||
row = await cursor.fetchone()
|
||||
return row is not None
|
||||
except Exception as exc:
|
||||
logger.warning("EventStore.is_processed error (degraded): %s", exc)
|
||||
return False
|
||||
|
||||
async def mark_processed(
|
||||
self,
|
||||
room_id: str,
|
||||
event_id: str,
|
||||
sender_hash: str = "",
|
||||
) -> bool:
|
||||
"""
|
||||
Insert (room_id, event_id) as processed.
|
||||
|
||||
Returns True on success, False if already exists or on error.
|
||||
Uses INSERT OR IGNORE to avoid duplicates without raising.
|
||||
"""
|
||||
if not self._ok or self._db is None:
|
||||
return False
|
||||
ts = int(time.time())
|
||||
try:
|
||||
await self._db.execute(
|
||||
"INSERT OR IGNORE INTO processed_events (room_id, event_id, ts, sender_hash) "
|
||||
"VALUES (?, ?, ?, ?)",
|
||||
(room_id, event_id, ts, sender_hash or None),
|
||||
)
|
||||
await self._db.commit()
|
||||
return True
|
||||
except Exception as exc:
|
||||
logger.warning("EventStore.mark_processed error (degraded): %s", exc)
|
||||
return False
|
||||
|
||||
# ── Prune ─────────────────────────────────────────────────────────────────
|
||||
|
||||
async def prune(self, batch: Optional[int] = None) -> int:
|
||||
"""
|
||||
Delete events older than ttl_h.
|
||||
|
||||
Returns the number of rows deleted (0 on error or degraded).
|
||||
Uses LIMIT batch to avoid long locks on large tables.
|
||||
"""
|
||||
if not self._ok or self._db is None:
|
||||
return 0
|
||||
|
||||
cutoff = int(time.time()) - self.ttl_h * 3600
|
||||
effective_batch = batch or self.prune_batch
|
||||
deleted = 0
|
||||
|
||||
try:
|
||||
# SQLite DELETE with LIMIT requires compiling with SQLITE_ENABLE_UPDATE_DELETE_LIMIT,
|
||||
# which may not be available. Use a subquery approach instead.
|
||||
await self._db.execute(
|
||||
"DELETE FROM processed_events "
|
||||
"WHERE rowid IN ("
|
||||
" SELECT rowid FROM processed_events WHERE ts < ? LIMIT ?"
|
||||
")",
|
||||
(cutoff, effective_batch),
|
||||
)
|
||||
await self._db.commit()
|
||||
# Estimate rows deleted from changes()
|
||||
async with self._db.execute("SELECT changes()") as cursor:
|
||||
row = await cursor.fetchone()
|
||||
deleted = row[0] if row else 0
|
||||
self._last_prune_at = time.time()
|
||||
self._pruned_rows_last = deleted
|
||||
if deleted:
|
||||
logger.info("EventStore pruned %d rows (cutoff=%d)", deleted, cutoff)
|
||||
except Exception as exc:
|
||||
logger.warning("EventStore.prune error: %s", exc)
|
||||
|
||||
return deleted
|
||||
|
||||
# ── Health / introspection ─────────────────────────────────────────────────
|
||||
|
||||
def as_health_dict(self) -> dict:
|
||||
return {
|
||||
"enabled": self._ok,
|
||||
"db_path": self.db_path,
|
||||
"ttl_h": self.ttl_h,
|
||||
"ok": self._ok,
|
||||
"last_prune_at": self._last_prune_at,
|
||||
"pruned_rows_last": self._pruned_rows_last,
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -33,6 +33,9 @@ except ImportError: # pragma: no cover
|
||||
|
||||
from .config import BridgeConfig, load_config
|
||||
from .control import ControlConfig, parse_control_config
|
||||
from .control_limiter import ControlRateLimiter
|
||||
from .event_store import EventStore
|
||||
from .node_policy import parse_node_policy
|
||||
from .ingress import MatrixIngressLoop
|
||||
from .mixed_routing import MixedRoomConfig, parse_mixed_room_map
|
||||
from .rate_limit import InMemoryRateLimiter
|
||||
@@ -69,7 +72,7 @@ if _PROM_OK:
|
||||
_invoke_latency = Histogram(
|
||||
"matrix_bridge_invoke_duration_seconds",
|
||||
"Latency of DAGI Router infer call",
|
||||
["agent_id"],
|
||||
["agent_id", "node_id"], # M5.1: per-node latency breakdown
|
||||
buckets=[0.5, 1.0, 2.0, 5.0, 10.0, 20.0, 45.0],
|
||||
)
|
||||
_send_latency = Histogram(
|
||||
@@ -80,7 +83,8 @@ if _PROM_OK:
|
||||
)
|
||||
_bridge_up = Gauge(
|
||||
"matrix_bridge_up",
|
||||
"1 if bridge started successfully",
|
||||
"1 if bridge started successfully; 0 on config error",
|
||||
["node_id"], # M7.1: per-node label for multi-node deployments
|
||||
)
|
||||
_rate_limiter_active_rooms = Gauge(
|
||||
"matrix_bridge_rate_limiter_active_rooms",
|
||||
@@ -106,10 +110,11 @@ if _PROM_OK:
|
||||
["agent_id"],
|
||||
buckets=[0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 30.0],
|
||||
)
|
||||
# M2.2: Mixed room routing metrics
|
||||
_routed_total = Counter(
|
||||
"matrix_bridge_routed_total",
|
||||
"Successful message routing by reason",
|
||||
# M2.2: Mixed room routing — reason breakdown (slash/mention/name/default/direct)
|
||||
# M7.1: Renamed from matrix_bridge_routed_total to avoid collision with M5.0 counter
|
||||
_routing_reasons_total = Counter(
|
||||
"matrix_bridge_routing_reasons_total",
|
||||
"Message routing breakdown by agent and routing reason (slash/mention/name/default/direct)",
|
||||
["agent_id", "reason"],
|
||||
)
|
||||
_route_rejected_total = Counter(
|
||||
@@ -127,20 +132,74 @@ if _PROM_OK:
|
||||
"Total control commands received from authorized operators",
|
||||
["sender", "verb", "subcommand"],
|
||||
)
|
||||
_control_rate_limited_total = Counter(
|
||||
"matrix_bridge_control_rate_limited_total",
|
||||
"Total control commands rejected by rate limiter or cooldown",
|
||||
["scope"],
|
||||
)
|
||||
_dedupe_persistent_hits_total = Counter(
|
||||
"matrix_bridge_dedupe_persistent_hits_total",
|
||||
"Total events dropped by persistent (SQLite) deduplication",
|
||||
["room_id"],
|
||||
)
|
||||
_dedupe_persistent_inserts_total = Counter(
|
||||
"matrix_bridge_dedupe_persistent_inserts_total",
|
||||
"Total events marked as processed in persistent dedupe store",
|
||||
)
|
||||
# M5.0: node-aware routing — primary routed counter (unique name, no collision)
|
||||
_routed_total = Counter(
|
||||
"matrix_bridge_routed_total",
|
||||
"Total messages successfully routed, by agent, resolved node, and node source",
|
||||
["agent_id", "node_id", "source"],
|
||||
)
|
||||
_node_rejected_total = Counter(
|
||||
"matrix_bridge_node_rejected_total",
|
||||
"Total messages with rejected (non-allowlisted) node kwarg",
|
||||
["node_id"],
|
||||
)
|
||||
# M8.0: soft-failover metrics
|
||||
_failover_total = Counter(
|
||||
"matrix_bridge_failover_total",
|
||||
"Total successful soft-failovers by node transition and reason",
|
||||
["from_node", "to_node", "reason"],
|
||||
)
|
||||
_node_health_state = Gauge(
|
||||
"matrix_bridge_node_health_state",
|
||||
"Node health state: 1=healthy 0.5=degraded 0=down",
|
||||
["node_id"],
|
||||
)
|
||||
# M8.1: sticky routing metrics
|
||||
_sticky_set_total = Counter(
|
||||
"matrix_bridge_sticky_node_total",
|
||||
"Total sticky routing entries set after failover, by preferred node and scope",
|
||||
["node_id", "scope"],
|
||||
)
|
||||
_sticky_active = Gauge(
|
||||
"matrix_bridge_sticky_node_active",
|
||||
"Current count of active sticky routing entries",
|
||||
[],
|
||||
)
|
||||
|
||||
# ── Startup state ─────────────────────────────────────────────────────────────
|
||||
_START_TIME = time.monotonic()
|
||||
_cfg: Optional[BridgeConfig] = None
|
||||
# M5.1: in-memory per-node counters (lightweight, for !status reply)
|
||||
from collections import defaultdict as _defaultdict
|
||||
_node_stats: Dict[str, Dict[str, int]] = _defaultdict(lambda: {"routed": 0, "rejected": 0})
|
||||
_config_error: Optional[str] = None
|
||||
_matrix_reachable: Optional[bool] = None
|
||||
_gateway_reachable: Optional[bool] = None
|
||||
_room_map: Optional[RoomMappingConfig] = None
|
||||
_mixed_room_config: Optional[MixedRoomConfig] = None
|
||||
_control_config: Optional[ControlConfig] = None
|
||||
_event_store: Optional[EventStore] = None
|
||||
_rate_limiter: Optional[InMemoryRateLimiter] = None
|
||||
_ingress_loop: Optional["MatrixIngressLoop"] = None # for /health queue_size
|
||||
_ingress_task: Optional[asyncio.Task] = None
|
||||
_ingress_stop: Optional[asyncio.Event] = None
|
||||
_sticky_cache: Optional[Any] = None # M8.1: StickyNodeCache instance
|
||||
_confirm_store: Optional[Any] = None # M9.0: ConfirmStore instance
|
||||
_dummy_http_client: Optional[Any] = None # M11: soak inject endpoint (debug only)
|
||||
|
||||
|
||||
async def _probe_url(url: str, timeout: float = 5.0) -> bool:
|
||||
@@ -230,7 +289,7 @@ async def lifespan(app_: Any):
|
||||
else:
|
||||
logger.warning("⚠️ DAGI Gateway NOT reachable: %s", _cfg.dagi_gateway_url)
|
||||
if _PROM_OK:
|
||||
_bridge_up.set(1)
|
||||
_bridge_up.labels(node_id=_cfg.node_id or "").set(1) # M7.1: labeled
|
||||
|
||||
# Start ingress loop (fire-and-forget asyncio task)
|
||||
_has_rooms = (_room_map and _room_map.total_mappings > 0) or (
|
||||
@@ -263,9 +322,9 @@ async def lifespan(app_: Any):
|
||||
_rate_limiter_active_rooms.set(stats["active_rooms"])
|
||||
_rate_limiter_active_senders.set(stats["active_senders"])
|
||||
|
||||
def _on_invoke_latency(agent_id: str, duration_s: float) -> None:
|
||||
def _on_invoke_latency(agent_id: str, duration_s: float, node_id: str = "") -> None:
|
||||
if _PROM_OK:
|
||||
_invoke_latency.labels(agent_id=agent_id).observe(duration_s)
|
||||
_invoke_latency.labels(agent_id=agent_id, node_id=node_id or "unknown").observe(duration_s)
|
||||
|
||||
def _on_send_latency(agent_id: str, duration_s: float) -> None:
|
||||
if _PROM_OK:
|
||||
@@ -287,7 +346,7 @@ async def lifespan(app_: Any):
|
||||
# M2.2 callbacks
|
||||
def _on_routed(agent_id: str, reason: str) -> None:
|
||||
if _PROM_OK:
|
||||
_routed_total.labels(agent_id=agent_id, reason=reason).inc()
|
||||
_routing_reasons_total.labels(agent_id=agent_id, reason=reason).inc() # M7.1: renamed
|
||||
|
||||
def _on_route_rejected(room_id: str, reason: str) -> None:
|
||||
if _PROM_OK:
|
||||
@@ -300,6 +359,164 @@ async def lifespan(app_: Any):
|
||||
sender=sender, verb=verb, subcommand=subcommand
|
||||
).inc()
|
||||
|
||||
# M3.4: control safety rate limiter
|
||||
_control_limiter = ControlRateLimiter(
|
||||
room_rpm=_cfg.control_room_rpm,
|
||||
operator_rpm=_cfg.control_operator_rpm,
|
||||
run_next_rpm=_cfg.control_run_next_rpm,
|
||||
cooldown_s=_cfg.control_cooldown_s,
|
||||
) if _control_config and _control_config.is_enabled else None
|
||||
|
||||
def _on_control_rate_limited(scope: str) -> None:
|
||||
if _PROM_OK:
|
||||
_control_rate_limited_total.labels(scope=scope).inc()
|
||||
|
||||
# M2.3: Persistent event deduplication
|
||||
_prune_task: Optional[asyncio.Task] = None
|
||||
if _cfg.persistent_dedupe:
|
||||
import os
|
||||
db_path = os.path.join(_cfg.bridge_data_dir, "matrix_bridge.db")
|
||||
_event_store = EventStore(
|
||||
db_path=db_path,
|
||||
ttl_h=_cfg.processed_events_ttl_h,
|
||||
prune_batch=_cfg.processed_events_prune_batch,
|
||||
)
|
||||
store_ok = await _event_store.open()
|
||||
if store_ok:
|
||||
logger.info(
|
||||
"✅ Persistent dedupe: %s (ttl_h=%d)",
|
||||
db_path, _cfg.processed_events_ttl_h,
|
||||
)
|
||||
# Best-effort prune on startup
|
||||
pruned = await _event_store.prune()
|
||||
if pruned:
|
||||
logger.info("Startup prune removed %d stale events", pruned)
|
||||
# Periodic prune task
|
||||
if _cfg.processed_events_prune_interval_s > 0:
|
||||
async def _prune_loop() -> None:
|
||||
while True:
|
||||
await asyncio.sleep(_cfg.processed_events_prune_interval_s)
|
||||
if _event_store:
|
||||
await _event_store.prune()
|
||||
_prune_task = asyncio.create_task(_prune_loop(), name="event_store_prune")
|
||||
else:
|
||||
logger.warning("⚠️ EventStore init failed — persistent dedupe disabled (degraded)")
|
||||
_event_store = None
|
||||
else:
|
||||
logger.info("Persistent dedupe disabled (PERSISTENT_DEDUPE=0)")
|
||||
|
||||
def _on_dedupe_hit(room_id: str, agent_id: str) -> None:
|
||||
if _PROM_OK:
|
||||
_dedupe_persistent_hits_total.labels(room_id=room_id).inc()
|
||||
|
||||
def _on_dedupe_insert() -> None:
|
||||
if _PROM_OK:
|
||||
_dedupe_persistent_inserts_total.inc()
|
||||
|
||||
# M5.0: node-aware routing policy
|
||||
_node_policy = parse_node_policy(
|
||||
raw_allowed=_cfg.bridge_allowed_nodes,
|
||||
default_node=_cfg.bridge_default_node,
|
||||
raw_room_map=_cfg.bridge_room_node_map,
|
||||
)
|
||||
logger.info(
|
||||
"✅ Node policy: default=%s allowed=%s room_overrides=%d",
|
||||
_node_policy.default_node,
|
||||
sorted(_node_policy.allowed_nodes),
|
||||
len(_node_policy.room_node_map),
|
||||
)
|
||||
|
||||
# M6.0: Persistent policy store for dynamic room-node overrides
|
||||
_policy_store: Optional[Any] = None
|
||||
try:
|
||||
from .policy_store import PolicyStore as _PolicyStore
|
||||
import os
|
||||
_ps_path = os.path.join(_cfg.bridge_data_dir, "policy_overrides.db")
|
||||
_policy_store = _PolicyStore(db_path=_ps_path)
|
||||
_policy_store.open()
|
||||
logger.info(
|
||||
"✅ Policy store: %s (%d overrides)",
|
||||
_ps_path, _policy_store.count_overrides(),
|
||||
)
|
||||
except Exception as _ps_exc:
|
||||
logger.warning("Policy store init failed (non-fatal): %s", _ps_exc)
|
||||
_policy_store = None
|
||||
|
||||
def _on_node_selected(agent_id: str, node_id: str, source: str) -> None:
|
||||
if _PROM_OK:
|
||||
_routed_total.labels(agent_id=agent_id, node_id=node_id, source=source).inc()
|
||||
_node_stats[node_id]["routed"] += 1
|
||||
|
||||
def _on_node_rejected(rejected_node: str) -> None:
|
||||
if _PROM_OK:
|
||||
_node_rejected_total.labels(node_id=rejected_node).inc()
|
||||
_node_stats[rejected_node]["rejected"] += 1
|
||||
|
||||
# M8.0: Node health tracker + soft-failover
|
||||
from .node_health import NodeHealthTracker as _NodeHealthTracker, parse_node_health_config as _parse_nhc
|
||||
_health_cfg = _parse_nhc(
|
||||
fail_consecutive=_cfg.node_fail_consecutive,
|
||||
lat_ewma_s=_cfg.node_lat_ewma_s,
|
||||
ewma_alpha=_cfg.node_ewma_alpha,
|
||||
)
|
||||
_node_health_tracker = _NodeHealthTracker(_health_cfg)
|
||||
logger.info(
|
||||
"✅ Node health tracker: fail_consecutive=%d lat_ewma_s=%.1f ewma_alpha=%.2f",
|
||||
_cfg.node_fail_consecutive, _cfg.node_lat_ewma_s, _cfg.node_ewma_alpha,
|
||||
)
|
||||
|
||||
def _on_failover(from_node: str, to_node: str, reason: str) -> None:
|
||||
if _PROM_OK:
|
||||
_failover_total.labels(
|
||||
from_node=from_node, to_node=to_node, reason=reason
|
||||
).inc()
|
||||
if _PROM_OK:
|
||||
_update_health_gauges()
|
||||
logger.info("⚡ Failover: %s → %s reason=%s", from_node, to_node, reason)
|
||||
|
||||
def _update_health_gauges() -> None:
|
||||
if not _PROM_OK or _node_health_tracker is None or _node_policy is None:
|
||||
return
|
||||
_STATE_MAP = {"healthy": 1.0, "degraded": 0.5, "down": 0.0}
|
||||
for nid in _node_policy.allowed_nodes:
|
||||
state = _node_health_tracker.state(nid)
|
||||
_node_health_state.labels(node_id=nid).set(_STATE_MAP.get(state, 1.0))
|
||||
|
||||
# M8.1: Sticky failover cache
|
||||
from .sticky_cache import StickyNodeCache as _StickyNodeCache
|
||||
global _sticky_cache
|
||||
if _cfg.failover_sticky_ttl_s > 0:
|
||||
_sticky_cache = _StickyNodeCache(ttl_s=_cfg.failover_sticky_ttl_s)
|
||||
logger.info("✅ Sticky failover cache: ttl=%.0fs", _cfg.failover_sticky_ttl_s)
|
||||
else:
|
||||
_sticky_cache = None
|
||||
logger.info("ℹ️ Sticky failover disabled (FAILOVER_STICKY_TTL_S=0)")
|
||||
|
||||
# M9.0: Confirm store
|
||||
from .confirm_store import ConfirmStore as _ConfirmStore
|
||||
global _confirm_store
|
||||
if _cfg.confirm_ttl_s > 0:
|
||||
_confirm_store = _ConfirmStore(ttl_s=_cfg.confirm_ttl_s)
|
||||
logger.info("✅ Confirm store: ttl=%.0fs", _cfg.confirm_ttl_s)
|
||||
else:
|
||||
_confirm_store = None
|
||||
logger.info("ℹ️ Confirm store disabled (CONFIRM_TTL_S=0)")
|
||||
|
||||
# M11: debug inject client (only created when inject is enabled)
|
||||
global _dummy_http_client
|
||||
if _cfg.debug_inject_enabled and _HTTPX_OK:
|
||||
_dummy_http_client = _httpx.AsyncClient(timeout=30.0)
|
||||
logger.warning(
|
||||
"⚠️ DEBUG_INJECT_ENABLED=true — synthetic event injection active. "
|
||||
"NEVER use in production!"
|
||||
)
|
||||
|
||||
def _on_sticky_set(node_id: str, scope: str) -> None:
|
||||
if _PROM_OK:
|
||||
_sticky_set_total.labels(node_id=node_id, scope=scope).inc()
|
||||
if _sticky_cache is not None:
|
||||
_sticky_active.labels().set(_sticky_cache.active_count())
|
||||
|
||||
ingress = MatrixIngressLoop(
|
||||
matrix_homeserver_url=_cfg.matrix_homeserver_url,
|
||||
matrix_access_token=_cfg.matrix_access_token,
|
||||
@@ -330,7 +547,38 @@ async def lifespan(app_: Any):
|
||||
on_route_rejected=_on_route_rejected,
|
||||
control_config=_control_config,
|
||||
control_unauthorized_behavior=_cfg.control_unauthorized_behavior,
|
||||
sofiia_control_token=_cfg.sofiia_control_token,
|
||||
control_limiter=_control_limiter,
|
||||
on_control_command=_on_control_command,
|
||||
on_control_rate_limited=_on_control_rate_limited,
|
||||
event_store=_event_store,
|
||||
on_dedupe_persistent_hit=_on_dedupe_hit,
|
||||
on_dedupe_persistent_insert=_on_dedupe_insert,
|
||||
# M4.0: agent discovery
|
||||
discovery_rpm=_cfg.discovery_rpm,
|
||||
# M5.0: node-aware routing
|
||||
node_policy=_node_policy,
|
||||
on_node_selected=_on_node_selected,
|
||||
on_node_rejected=_on_node_rejected,
|
||||
# M5.1: node stats getter for !status
|
||||
node_stats_getter=lambda: {k: dict(v) for k, v in _node_stats.items()},
|
||||
# M6.0: dynamic room-node policy store
|
||||
policy_store=_policy_store,
|
||||
# M6.2: data directory for policy exports/imports
|
||||
bridge_data_dir=_cfg.bridge_data_dir,
|
||||
# M8.0: node health tracker + failover callback
|
||||
node_health_tracker=_node_health_tracker,
|
||||
on_failover=_on_failover,
|
||||
# M8.1: sticky failover cache
|
||||
sticky_cache=_sticky_cache,
|
||||
on_sticky_set=_on_sticky_set,
|
||||
# M8.2: HA persistence config
|
||||
ha_health_snapshot_interval_s=_cfg.ha_health_snapshot_interval_s,
|
||||
ha_health_max_age_s=_cfg.ha_health_max_age_s,
|
||||
# M9.0: Two-step confirmation store
|
||||
confirm_store=_confirm_store,
|
||||
policy_export_retention_days=_cfg.policy_export_retention_days,
|
||||
policy_history_limit=_cfg.policy_history_limit,
|
||||
)
|
||||
logger.info(
|
||||
"✅ Backpressure queue: max=%d workers=%d drain_timeout=%.1fs",
|
||||
@@ -349,7 +597,8 @@ async def lifespan(app_: Any):
|
||||
_config_error = str(exc)
|
||||
logger.error("❌ Config error: %s", _config_error)
|
||||
if _PROM_OK:
|
||||
_bridge_up.set(0)
|
||||
_cfg_node = _cfg.node_id if _cfg else ""
|
||||
_bridge_up.labels(node_id=_cfg_node or "").set(0) # M7.1: labeled
|
||||
yield
|
||||
# Shutdown: cancel ingress loop
|
||||
if _ingress_stop:
|
||||
@@ -360,6 +609,23 @@ async def lifespan(app_: Any):
|
||||
await asyncio.wait_for(_ingress_task, timeout=5.0)
|
||||
except (asyncio.CancelledError, asyncio.TimeoutError):
|
||||
pass
|
||||
# Shutdown: cancel prune task + close EventStore
|
||||
if "_prune_task" in dir() and _prune_task and not _prune_task.done(): # type: ignore[name-defined]
|
||||
_prune_task.cancel() # type: ignore[name-defined]
|
||||
if _event_store is not None:
|
||||
await _event_store.close()
|
||||
# M6.0: close policy store
|
||||
if "_policy_store" in dir() and _policy_store is not None: # type: ignore[name-defined]
|
||||
try:
|
||||
_policy_store.close() # type: ignore[name-defined]
|
||||
except Exception: # noqa: BLE001
|
||||
pass
|
||||
# M11: close debug http client if open
|
||||
if _dummy_http_client is not None:
|
||||
try:
|
||||
await _dummy_http_client.aclose()
|
||||
except Exception: # noqa: BLE001
|
||||
pass
|
||||
logger.info("matrix-bridge-dagi shutting down")
|
||||
|
||||
# ── App ───────────────────────────────────────────────────────────────────────
|
||||
@@ -435,6 +701,89 @@ async def health() -> Dict[str, Any]:
|
||||
"operators_count": len(_control_config.operator_allowlist) if _control_config else 0,
|
||||
"unauthorized_behavior": _cfg.control_unauthorized_behavior,
|
||||
},
|
||||
"control_safety": {
|
||||
"enabled": _cfg.control_room_rpm > 0 or _cfg.control_operator_rpm > 0,
|
||||
"room_rpm": _cfg.control_room_rpm,
|
||||
"operator_rpm": _cfg.control_operator_rpm,
|
||||
"run_next_rpm": _cfg.control_run_next_rpm,
|
||||
"cooldown_s": _cfg.control_cooldown_s,
|
||||
},
|
||||
"persistent_dedupe": _event_store.as_health_dict() if _event_store else {
|
||||
"enabled": False,
|
||||
"db_path": None,
|
||||
"ttl_h": _cfg.processed_events_ttl_h,
|
||||
"ok": False,
|
||||
"last_prune_at": None,
|
||||
"pruned_rows_last": 0,
|
||||
},
|
||||
# M6.0: policy store health
|
||||
"policy_store": _health_policy_store_dict(),
|
||||
# M8.1: sticky failover cache health
|
||||
"sticky_cache": _health_sticky_dict(),
|
||||
# M8.2: HA state persistence info
|
||||
"ha_state": _health_ha_dict(),
|
||||
# M9.0: confirm store
|
||||
"confirm_store": _health_confirm_dict(),
|
||||
}
|
||||
|
||||
|
||||
def _health_confirm_dict() -> Dict[str, Any]:
|
||||
"""Return confirm store info for /health endpoint (M9.0)."""
|
||||
if _confirm_store is None:
|
||||
return {"enabled": False}
|
||||
return {
|
||||
"enabled": True,
|
||||
"pending": _confirm_store.pending_count(),
|
||||
"ttl_s": _confirm_store.ttl_s,
|
||||
}
|
||||
|
||||
|
||||
def _health_ha_dict() -> Dict[str, Any]:
|
||||
"""Return HA persistence info for /health endpoint (M8.2)."""
|
||||
if _ingress_loop is None:
|
||||
return {"sticky_loaded": 0, "health_loaded": False, "snapshot_interval_s": 0}
|
||||
try:
|
||||
s = _ingress_loop.get_status()
|
||||
return {
|
||||
"sticky_loaded": s.get("ha_sticky_loaded", 0),
|
||||
"health_loaded": s.get("ha_health_loaded", False),
|
||||
"snapshot_interval_s": s.get("ha_health_snapshot_interval_s", 0),
|
||||
}
|
||||
except Exception: # noqa: BLE001
|
||||
return {"sticky_loaded": 0, "health_loaded": False, "snapshot_interval_s": 0}
|
||||
|
||||
|
||||
def _health_sticky_dict() -> Dict[str, Any]:
|
||||
"""Return sticky failover cache health for /health endpoint (M8.1)."""
|
||||
if _sticky_cache is None:
|
||||
return {"enabled": False, "active_keys": 0, "ttl_s": 0}
|
||||
return {
|
||||
"enabled": True,
|
||||
"active_keys": _sticky_cache.active_count(),
|
||||
"ttl_s": _sticky_cache.ttl_s,
|
||||
}
|
||||
|
||||
|
||||
def _health_policy_store_dict() -> Dict[str, Any]:
|
||||
"""Return policy store health info for /health endpoint."""
|
||||
try:
|
||||
if _ingress_loop is not None:
|
||||
s = _ingress_loop.get_status()
|
||||
return {
|
||||
"ok": s.get("policy_store_ok", False),
|
||||
"path": s.get("policy_store_path"),
|
||||
"overrides_count": s.get("policy_overrides_count", 0),
|
||||
"agent_overrides_count": s.get("policy_agent_overrides_count", 0), # M6.1
|
||||
"last_export_at": s.get("policy_last_export_at"), # M6.2
|
||||
"last_import_at": s.get("policy_last_import_at"), # M6.2
|
||||
"db_mtime": s.get("policy_db_mtime"), # M6.2
|
||||
}
|
||||
except Exception: # noqa: BLE001
|
||||
pass
|
||||
return {
|
||||
"ok": False, "path": None,
|
||||
"overrides_count": 0, "agent_overrides_count": 0,
|
||||
"last_export_at": None, "last_import_at": None, "db_mtime": None,
|
||||
}
|
||||
|
||||
|
||||
@@ -464,6 +813,101 @@ async def bridge_mappings() -> Dict[str, Any]:
|
||||
}
|
||||
|
||||
|
||||
# ── Debug / Soak (M11) ────────────────────────────────────────────────────────
|
||||
@app.post("/v1/debug/inject_event")
|
||||
async def debug_inject_event(body: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Synthetic event injection for soak/load testing.
|
||||
|
||||
Enabled ONLY when DEBUG_INJECT_ENABLED=true (never in production).
|
||||
|
||||
Body: { "room_id": "!room:server", "event": { Matrix event dict } }
|
||||
The event is enqueued directly into the ingress loop, bypassing Matrix poll.
|
||||
|
||||
Returns: { "ok": bool, "enqueued": bool, "room_id": str, "event_id": str }
|
||||
"""
|
||||
if _cfg is None or not _cfg.debug_inject_enabled:
|
||||
return Response( # type: ignore[return-value]
|
||||
'{"ok":false,"error":"debug inject disabled"}',
|
||||
status_code=403,
|
||||
media_type="application/json",
|
||||
)
|
||||
if _ingress_loop is None:
|
||||
return {"ok": False, "error": "ingress loop not running"}
|
||||
|
||||
room_id = body.get("room_id", "")
|
||||
event = body.get("event", {})
|
||||
if not room_id or not event:
|
||||
return {"ok": False, "error": "missing room_id or event"}
|
||||
|
||||
# Ensure event has minimum required fields for ingress processing
|
||||
if not event.get("event_id"):
|
||||
import time as _time
|
||||
event["event_id"] = f"!inject-{int(_time.monotonic() * 1e6)}"
|
||||
if not event.get("type"):
|
||||
event["type"] = "m.room.message"
|
||||
if not event.get("content"):
|
||||
event["content"] = {"msgtype": "m.text", "body": event.get("body", "soak-ping")}
|
||||
|
||||
# Build a minimal sync_resp that looks like a real Matrix /sync response
|
||||
# so _enqueue_from_sync can pick it up via extract_room_messages.
|
||||
# We bypass Matrix polling by directly calling _try_enqueue on the right mapping.
|
||||
enqueued = False
|
||||
try:
|
||||
# Find the matching room mapping (direct rooms only for soak)
|
||||
mapping = None
|
||||
if _ingress_loop._room_map is not None:
|
||||
for m in _ingress_loop._room_map.mappings:
|
||||
if m.room_id == room_id:
|
||||
mapping = m
|
||||
break
|
||||
|
||||
if mapping is None:
|
||||
return {"ok": False, "error": f"no mapping for room_id={room_id!r}"}
|
||||
|
||||
# Build a minimal stub Matrix client — replies are discarded for soak events
|
||||
from .matrix_client import MatrixClient
|
||||
|
||||
class _SoakMatrixClient(MatrixClient): # type: ignore[misc]
|
||||
"""No-op Matrix client for synthetic soak events."""
|
||||
def __init__(self) -> None: # noqa: D107
|
||||
pass # skip real __init__
|
||||
|
||||
async def mark_seen(self, room_id: str, event_id: str) -> None: # type: ignore[override]
|
||||
pass
|
||||
|
||||
async def send_text(self, room_id: str, text: str, # type: ignore[override]
|
||||
txn: Optional[str] = None) -> None:
|
||||
pass
|
||||
|
||||
_stub_client = _SoakMatrixClient()
|
||||
|
||||
if _dummy_http_client is None:
|
||||
return {"ok": False, "error": "debug http client not initialised"}
|
||||
|
||||
await _ingress_loop._try_enqueue(
|
||||
_stub_client, # type: ignore[arg-type]
|
||||
_ingress_loop._queue,
|
||||
_dummy_http_client,
|
||||
event,
|
||||
mapping,
|
||||
)
|
||||
enqueued = True
|
||||
except Exception as exc: # noqa: BLE001
|
||||
return {"ok": False, "error": str(exc), "enqueued": False}
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"enqueued": enqueued,
|
||||
"room_id": room_id,
|
||||
"event_id": event.get("event_id"),
|
||||
}
|
||||
|
||||
|
||||
async def _noop_send(room_id: str, text: str, txn: Optional[str] = None) -> None:
|
||||
"""Discard replies from injected soak events."""
|
||||
|
||||
|
||||
# ── Metrics ───────────────────────────────────────────────────────────────────
|
||||
@app.get("/metrics")
|
||||
async def metrics():
|
||||
|
||||
224
services/matrix-bridge-dagi/app/metrics_contract.py
Normal file
224
services/matrix-bridge-dagi/app/metrics_contract.py
Normal file
@@ -0,0 +1,224 @@
|
||||
"""
|
||||
Metrics Contract — Matrix Bridge DAGI
|
||||
Phase M7.1
|
||||
|
||||
Single source of truth for all Prometheus metric names and their label sets.
|
||||
Used by:
|
||||
- main.py (registers metrics against this contract)
|
||||
- tests/test_matrix_bridge_m71_metrics_contract.py (static validation)
|
||||
- ops/prometheus/alerts/matrix-bridge-dagi.rules.yml (PromQL expressions)
|
||||
- ops/grafana/dashboards/matrix-bridge-dagi.json (panel queries)
|
||||
|
||||
Format:
|
||||
METRICS_CONTRACT: Dict[metric_name, MetricSpec]
|
||||
|
||||
MetricSpec fields:
|
||||
kind : "counter" | "histogram" | "gauge"
|
||||
labels : list of label names (empty list = no labels)
|
||||
help : one-line description
|
||||
phase : originating milestone
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, List
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MetricSpec:
|
||||
kind: str # "counter" | "histogram" | "gauge"
|
||||
labels: List[str] # label names; empty = no labels
|
||||
help: str
|
||||
phase: str = "M1" # originating milestone for traceability
|
||||
|
||||
|
||||
# ── Contract ──────────────────────────────────────────────────────────────────
|
||||
|
||||
METRICS_CONTRACT: Dict[str, MetricSpec] = {
|
||||
|
||||
# ── Core message traffic ──────────────────────────────────────────────────
|
||||
"matrix_bridge_messages_received_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["room_id", "agent_id"],
|
||||
help="Total Matrix messages received",
|
||||
phase="M1",
|
||||
),
|
||||
"matrix_bridge_messages_replied_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["room_id", "agent_id", "status"],
|
||||
help="Total agent replies sent to Matrix (status=ok|error)",
|
||||
phase="M1",
|
||||
),
|
||||
"matrix_bridge_rate_limited_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["room_id", "agent_id", "limit_type"],
|
||||
help="Messages dropped by rate limiter",
|
||||
phase="H1",
|
||||
),
|
||||
"matrix_bridge_gateway_errors_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["error_type"],
|
||||
help="Bridge errors by stage: sync_error, network_error, http_<status>, matrix_send_error, unexpected",
|
||||
phase="M1",
|
||||
),
|
||||
|
||||
# ── Latency histograms ────────────────────────────────────────────────────
|
||||
"matrix_bridge_invoke_duration_seconds": MetricSpec(
|
||||
kind="histogram",
|
||||
labels=["agent_id", "node_id"],
|
||||
help="Latency of DAGI Router infer call, per agent and node",
|
||||
phase="H3",
|
||||
),
|
||||
"matrix_bridge_send_duration_seconds": MetricSpec(
|
||||
kind="histogram",
|
||||
labels=["agent_id"],
|
||||
help="Latency of Matrix send_text call",
|
||||
phase="H3",
|
||||
),
|
||||
"matrix_bridge_queue_wait_seconds": MetricSpec(
|
||||
kind="histogram",
|
||||
labels=["agent_id"],
|
||||
help="Time between enqueue and worker start processing",
|
||||
phase="H3",
|
||||
),
|
||||
|
||||
# ── Queue ─────────────────────────────────────────────────────────────────
|
||||
"matrix_bridge_queue_size": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=[],
|
||||
help="Current number of pending items in the work queue",
|
||||
phase="H2",
|
||||
),
|
||||
"matrix_bridge_queue_dropped_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["room_id", "agent_id"],
|
||||
help="Messages dropped because queue was full",
|
||||
phase="H2",
|
||||
),
|
||||
|
||||
# ── Rate limiter gauges ───────────────────────────────────────────────────
|
||||
"matrix_bridge_rate_limiter_active_rooms": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=[],
|
||||
help="Rooms with activity in the current rate-limit window",
|
||||
phase="H1",
|
||||
),
|
||||
"matrix_bridge_rate_limiter_active_senders": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=[],
|
||||
help="Senders with activity in the current rate-limit window",
|
||||
phase="H1",
|
||||
),
|
||||
|
||||
# ── Routing ───────────────────────────────────────────────────────────────
|
||||
"matrix_bridge_routing_reasons_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["agent_id", "reason"],
|
||||
help="Message routing breakdown by agent and routing reason (slash/mention/name/default/direct)",
|
||||
phase="M2.2",
|
||||
),
|
||||
"matrix_bridge_route_rejected_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["room_id", "reason"],
|
||||
help="Messages rejected during routing (unknown agent, bad slash, no mapping, etc.)",
|
||||
phase="M2.2",
|
||||
),
|
||||
"matrix_bridge_active_room_agent_locks": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=[],
|
||||
help="Number of room-agent pairs currently holding a concurrency lock",
|
||||
phase="M2.2",
|
||||
),
|
||||
|
||||
# ── Control channel ───────────────────────────────────────────────────────
|
||||
"matrix_bridge_control_commands_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["sender", "verb", "subcommand"],
|
||||
help="Total control commands received from authorized operators",
|
||||
phase="M3.0",
|
||||
),
|
||||
"matrix_bridge_control_rate_limited_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["scope"],
|
||||
help="Total control commands rejected by rate limiter or cooldown",
|
||||
phase="M3.4",
|
||||
),
|
||||
|
||||
# ── Persistent deduplication ─────────────────────────────────────────────
|
||||
"matrix_bridge_dedupe_persistent_hits_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["room_id"],
|
||||
help="Total events dropped by persistent (SQLite) deduplication",
|
||||
phase="M2.3",
|
||||
),
|
||||
"matrix_bridge_dedupe_persistent_inserts_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=[],
|
||||
help="Total events marked as processed in persistent dedupe store",
|
||||
phase="M2.3",
|
||||
),
|
||||
|
||||
# ── Node-aware routing (M5.0) ─────────────────────────────────────────────
|
||||
"matrix_bridge_routed_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["agent_id", "node_id", "source"],
|
||||
help="Total messages successfully routed, by agent, resolved node, and node source",
|
||||
phase="M5.0",
|
||||
),
|
||||
"matrix_bridge_node_rejected_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["node_id"],
|
||||
help="Total messages with rejected (non-allowlisted) node kwarg",
|
||||
phase="M5.0",
|
||||
),
|
||||
|
||||
# ── Bridge health (M7.1) ──────────────────────────────────────────────────
|
||||
"matrix_bridge_up": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=["node_id"],
|
||||
help="1 if bridge started successfully; 0 on config error",
|
||||
phase="M7.1",
|
||||
),
|
||||
|
||||
# ── Soft-failover (M8.0) ─────────────────────────────────────────────────
|
||||
"matrix_bridge_failover_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["from_node", "to_node", "reason"],
|
||||
help="Total successful soft-failovers by node transition and reason",
|
||||
phase="M8.0",
|
||||
),
|
||||
"matrix_bridge_node_health_state": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=["node_id"],
|
||||
help="Node health state gauge: 1=healthy 0.5=degraded 0=down",
|
||||
phase="M8.0",
|
||||
),
|
||||
|
||||
# ── Sticky routing anti-flap (M8.1) ──────────────────────────────────────
|
||||
"matrix_bridge_sticky_node_total": MetricSpec(
|
||||
kind="counter",
|
||||
labels=["node_id", "scope"],
|
||||
help="Total sticky routing entries set after failover, by preferred node and scope",
|
||||
phase="M8.1",
|
||||
),
|
||||
"matrix_bridge_sticky_node_active": MetricSpec(
|
||||
kind="gauge",
|
||||
labels=[],
|
||||
help="Current count of active sticky routing entries",
|
||||
phase="M8.1",
|
||||
),
|
||||
}
|
||||
|
||||
# ── Alert metric references ────────────────────────────────────────────────────
|
||||
# These are the metric base-names referenced in alert rules.
|
||||
# All must exist in METRICS_CONTRACT.
|
||||
ALERT_METRIC_REFS = frozenset({
|
||||
"matrix_bridge_up",
|
||||
"matrix_bridge_gateway_errors_total",
|
||||
"matrix_bridge_messages_replied_total",
|
||||
"matrix_bridge_queue_dropped_total",
|
||||
"matrix_bridge_rate_limited_total",
|
||||
"matrix_bridge_control_rate_limited_total",
|
||||
"matrix_bridge_dedupe_persistent_hits_total",
|
||||
"matrix_bridge_invoke_duration_seconds",
|
||||
})
|
||||
@@ -309,3 +309,25 @@ def reply_prefix(agent_id: str, is_mixed: bool) -> str:
|
||||
return ""
|
||||
# Capitalise first letter of agent name: "sofiia" → "Sofiia"
|
||||
return f"{agent_id.capitalize()}: "
|
||||
|
||||
|
||||
def build_override_config(
|
||||
base_config: MixedRoomConfig,
|
||||
room_id: str,
|
||||
agents: List[str],
|
||||
default_agent: str,
|
||||
) -> MixedRoomConfig:
|
||||
"""
|
||||
M6.1: Build a temporary MixedRoomConfig that uses a dynamic store override
|
||||
for room_id while keeping all other rooms from base_config unchanged.
|
||||
|
||||
Used in _enqueue_from_mixed_room to inject PolicyStore agent overrides
|
||||
without mutating the shared base configuration.
|
||||
"""
|
||||
rooms = dict(base_config.rooms)
|
||||
rooms[room_id] = MixedRoom(
|
||||
room_id=room_id,
|
||||
agents=agents,
|
||||
default_agent=default_agent,
|
||||
)
|
||||
return MixedRoomConfig(rooms=rooms)
|
||||
|
||||
262
services/matrix-bridge-dagi/app/node_health.py
Normal file
262
services/matrix-bridge-dagi/app/node_health.py
Normal file
@@ -0,0 +1,262 @@
|
||||
"""
|
||||
NodeHealthTracker — M8.0: per-node health state tracking for soft-failover.
|
||||
|
||||
Tracks invoke outcomes per node and maintains:
|
||||
- EWMA latency estimate
|
||||
- consecutive failure counter
|
||||
- last ok / last error timestamps
|
||||
- derived health state: "healthy" | "degraded" | "down"
|
||||
|
||||
State transitions
|
||||
-----------------
|
||||
Any state → "down" : consecutive_failures >= fail_consecutive
|
||||
Any state → "degraded" : ewma_latency_s >= lat_ewma_threshold
|
||||
(and not yet "down")
|
||||
"down"/"degraded" → "healthy" : record_ok() resets consecutive_failures to 0
|
||||
and ewma is updated towards the actual latency
|
||||
|
||||
Thread safety
|
||||
-------------
|
||||
All mutations are protected by a threading.Lock so this can be called from
|
||||
asyncio callbacks (e.g. in `_invoke_and_send` on the event loop thread).
|
||||
Use `record_ok` / `record_error` from within coroutines; they are synchronous
|
||||
(no blocking I/O) so they are safe to call directly without to_thread.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, FrozenSet, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ── State constants ────────────────────────────────────────────────────────────
|
||||
|
||||
NODE_STATE_HEALTHY = "healthy"
|
||||
NODE_STATE_DEGRADED = "degraded"
|
||||
NODE_STATE_DOWN = "down"
|
||||
|
||||
# Failover-triggering error classes
|
||||
FAILOVER_REASON_TIMEOUT = "timeout"
|
||||
FAILOVER_REASON_HTTP_5XX = "http_5xx"
|
||||
FAILOVER_REASON_NETWORK = "network"
|
||||
|
||||
|
||||
# ── Config ────────────────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class NodeHealthConfig:
|
||||
"""
|
||||
Thresholds controlling when a node is considered degraded or down.
|
||||
|
||||
fail_consecutive : int number of consecutive failures → "down"
|
||||
lat_ewma_s : float EWMA latency estimate (seconds) threshold → "degraded"
|
||||
ewma_alpha : float EWMA smoothing factor (0..1); higher = more reactive
|
||||
"""
|
||||
fail_consecutive: int = 3
|
||||
lat_ewma_s: float = 12.0
|
||||
ewma_alpha: float = 0.3
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if not (0 < self.ewma_alpha <= 1):
|
||||
raise ValueError(f"ewma_alpha must be in (0, 1], got {self.ewma_alpha}")
|
||||
if self.fail_consecutive < 1:
|
||||
raise ValueError(f"fail_consecutive must be ≥ 1, got {self.fail_consecutive}")
|
||||
if self.lat_ewma_s <= 0:
|
||||
raise ValueError(f"lat_ewma_s must be > 0, got {self.lat_ewma_s}")
|
||||
|
||||
|
||||
# ── Per-node state ────────────────────────────────────────────────────────────
|
||||
|
||||
@dataclass
|
||||
class _NodeState:
|
||||
invoke_ok_total: int = 0
|
||||
invoke_err_total: int = 0
|
||||
consecutive_failures: int = 0
|
||||
last_ok_ts: Optional[float] = None
|
||||
last_err_ts: Optional[float] = None
|
||||
ewma_latency_s: Optional[float] = None # None until first ok record
|
||||
|
||||
|
||||
# ── Tracker ───────────────────────────────────────────────────────────────────
|
||||
|
||||
class NodeHealthTracker:
|
||||
"""
|
||||
Thread-safe per-node health tracker.
|
||||
|
||||
Usage:
|
||||
tracker = NodeHealthTracker(NodeHealthConfig())
|
||||
|
||||
# On successful invoke
|
||||
tracker.record_ok("NODA1", latency_s=1.4)
|
||||
|
||||
# On failed invoke
|
||||
tracker.record_error("NODA1", reason=FAILOVER_REASON_TIMEOUT)
|
||||
|
||||
# Read health state
|
||||
state = tracker.state("NODA1") # "healthy" | "degraded" | "down"
|
||||
fallback = tracker.pick_fallback("NODA1", allowed_nodes=frozenset({"NODA1","NODA2"}))
|
||||
"""
|
||||
|
||||
def __init__(self, config: Optional[NodeHealthConfig] = None) -> None:
|
||||
self._cfg = config or NodeHealthConfig()
|
||||
self._nodes: Dict[str, _NodeState] = {}
|
||||
self._lock = threading.RLock() # RLock: re-entrant (needed for all_info → as_info_dict)
|
||||
|
||||
# ── Public mutation API ────────────────────────────────────────────────────
|
||||
|
||||
def record_ok(self, node_id: str, latency_s: float) -> None:
|
||||
"""Record a successful invoke for node_id with given latency."""
|
||||
with self._lock:
|
||||
ns = self._get_or_create(node_id)
|
||||
ns.invoke_ok_total += 1
|
||||
ns.consecutive_failures = 0
|
||||
ns.last_ok_ts = time.monotonic()
|
||||
if ns.ewma_latency_s is None:
|
||||
ns.ewma_latency_s = latency_s
|
||||
else:
|
||||
alpha = self._cfg.ewma_alpha
|
||||
ns.ewma_latency_s = alpha * latency_s + (1 - alpha) * ns.ewma_latency_s
|
||||
|
||||
def record_error(self, node_id: str, reason: str = "unknown") -> None:
|
||||
"""Record a failed invoke for node_id."""
|
||||
with self._lock:
|
||||
ns = self._get_or_create(node_id)
|
||||
ns.invoke_err_total += 1
|
||||
ns.consecutive_failures += 1
|
||||
ns.last_err_ts = time.monotonic()
|
||||
logger.debug(
|
||||
"NodeHealth: node=%s consecutive_failures=%d reason=%s",
|
||||
node_id, ns.consecutive_failures, reason,
|
||||
)
|
||||
|
||||
# ── Public read API ───────────────────────────────────────────────────────
|
||||
|
||||
def state(self, node_id: str) -> str:
|
||||
"""Return current health state for node_id."""
|
||||
with self._lock:
|
||||
return self._state_unlocked(node_id)
|
||||
|
||||
def pick_fallback(
|
||||
self,
|
||||
primary: str,
|
||||
allowed_nodes: FrozenSet[str],
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Return the best alternative node for failover.
|
||||
|
||||
Priority: healthy > degraded > (never down)
|
||||
Returns None if no acceptable fallback exists.
|
||||
"""
|
||||
with self._lock:
|
||||
candidates = sorted(n for n in allowed_nodes if n != primary)
|
||||
# Prefer healthy first
|
||||
for n in candidates:
|
||||
if self._state_unlocked(n) == NODE_STATE_HEALTHY:
|
||||
return n
|
||||
# Accept degraded if no healthy available
|
||||
for n in candidates:
|
||||
if self._state_unlocked(n) == NODE_STATE_DEGRADED:
|
||||
return n
|
||||
# Do not failover to "down" nodes
|
||||
return None
|
||||
|
||||
def as_info_dict(self, node_id: str) -> dict:
|
||||
"""Return a JSON-safe status dict for one node."""
|
||||
with self._lock:
|
||||
ns = self._nodes.get(node_id)
|
||||
if ns is None:
|
||||
return {
|
||||
"node_id": node_id,
|
||||
"state": NODE_STATE_HEALTHY,
|
||||
"invoke_ok": 0,
|
||||
"invoke_err": 0,
|
||||
"consecutive_failures": 0,
|
||||
"ewma_latency_s": None,
|
||||
"last_ok_ts": None,
|
||||
"last_err_ts": None,
|
||||
}
|
||||
return {
|
||||
"node_id": node_id,
|
||||
"state": self._state_unlocked(node_id),
|
||||
"invoke_ok": ns.invoke_ok_total,
|
||||
"invoke_err": ns.invoke_err_total,
|
||||
"consecutive_failures": ns.consecutive_failures,
|
||||
"ewma_latency_s": round(ns.ewma_latency_s, 3) if ns.ewma_latency_s else None,
|
||||
"last_ok_ts": ns.last_ok_ts,
|
||||
"last_err_ts": ns.last_err_ts,
|
||||
}
|
||||
|
||||
def all_info(self, allowed_nodes: Optional[FrozenSet[str]] = None) -> Dict[str, dict]:
|
||||
"""
|
||||
Return status dicts for all tracked (or specified) nodes.
|
||||
If allowed_nodes provided, also include entries for unseen nodes (state=healthy).
|
||||
"""
|
||||
with self._lock:
|
||||
keys = set(self._nodes.keys())
|
||||
if allowed_nodes:
|
||||
keys |= set(allowed_nodes)
|
||||
return {n: self.as_info_dict(n) for n in sorted(keys)}
|
||||
|
||||
def reset(self, node_id: str) -> None:
|
||||
"""Reset health state for a node (e.g. after manual recovery)."""
|
||||
with self._lock:
|
||||
self._nodes.pop(node_id, None)
|
||||
|
||||
def restore_node(
|
||||
self,
|
||||
node_id: str,
|
||||
ewma_latency_s: Optional[float],
|
||||
consecutive_failures: int,
|
||||
) -> None:
|
||||
"""
|
||||
Restore persisted node state after a restart (M8.2).
|
||||
|
||||
Only restores ewma_latency_s and consecutive_failures; counters
|
||||
(invoke_ok_total, invoke_err_total) start from 0 since they are
|
||||
runtime metrics for the current session.
|
||||
"""
|
||||
with self._lock:
|
||||
ns = self._get_or_create(node_id)
|
||||
ns.ewma_latency_s = ewma_latency_s
|
||||
ns.consecutive_failures = max(0, consecutive_failures)
|
||||
|
||||
# ── Internal ──────────────────────────────────────────────────────────────
|
||||
|
||||
def _get_or_create(self, node_id: str) -> _NodeState:
|
||||
if node_id not in self._nodes:
|
||||
self._nodes[node_id] = _NodeState()
|
||||
return self._nodes[node_id]
|
||||
|
||||
def _state_unlocked(self, node_id: str) -> str:
|
||||
ns = self._nodes.get(node_id)
|
||||
if ns is None:
|
||||
return NODE_STATE_HEALTHY # unseen nodes are assumed healthy
|
||||
|
||||
if ns.consecutive_failures >= self._cfg.fail_consecutive:
|
||||
return NODE_STATE_DOWN
|
||||
|
||||
if (
|
||||
ns.ewma_latency_s is not None
|
||||
and ns.ewma_latency_s >= self._cfg.lat_ewma_s
|
||||
):
|
||||
return NODE_STATE_DEGRADED
|
||||
|
||||
return NODE_STATE_HEALTHY
|
||||
|
||||
|
||||
# ── Parser (env vars → NodeHealthConfig) ──────────────────────────────────────
|
||||
|
||||
def parse_node_health_config(
|
||||
fail_consecutive: int = 3,
|
||||
lat_ewma_s: float = 12.0,
|
||||
ewma_alpha: float = 0.3,
|
||||
) -> NodeHealthConfig:
|
||||
"""Construct NodeHealthConfig from parsed env values."""
|
||||
return NodeHealthConfig(
|
||||
fail_consecutive=fail_consecutive,
|
||||
lat_ewma_s=lat_ewma_s,
|
||||
ewma_alpha=ewma_alpha,
|
||||
)
|
||||
179
services/matrix-bridge-dagi/app/node_policy.py
Normal file
179
services/matrix-bridge-dagi/app/node_policy.py
Normal file
@@ -0,0 +1,179 @@
|
||||
"""
|
||||
node_policy — Node-aware routing for matrix-bridge-dagi.
|
||||
|
||||
Resolves which NODA (NODA1, NODA2, …) a message should be tagged with based on:
|
||||
1. Explicit `node=X` kwarg in the message body (mixed rooms only)
|
||||
2. Dynamic store override (PolicyStore, set by operators via !node set) ← M6.0
|
||||
3. Static per-room mapping from BRIDGE_ROOM_NODE_MAP env
|
||||
4. BRIDGE_DEFAULT_NODE (fallback)
|
||||
|
||||
The resolved node_id is embedded in the Router metadata so downstream
|
||||
services (Router / Memory / Agent) can apply per-node policies.
|
||||
|
||||
This module does NOT change the HTTP endpoint called — the Router URL
|
||||
stays the same.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Dict, FrozenSet, Optional, Tuple
|
||||
|
||||
# Regex to find 'node=X' anywhere in message text (case-insensitive)
|
||||
_NODE_KWARG_RE = re.compile(r"\bnode=(\w+)\b", re.IGNORECASE)
|
||||
|
||||
# Node resolution sources (priority order)
|
||||
NODE_SOURCE_EXPLICIT = "explicit"
|
||||
NODE_SOURCE_STORE = "store" # M6.0: dynamic PolicyStore override
|
||||
NODE_SOURCE_ROOM_MAP = "room_map"
|
||||
NODE_SOURCE_DEFAULT = "default"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class NodeResolution:
|
||||
"""Result of resolving the target node for a message."""
|
||||
node_id: str
|
||||
source: str
|
||||
rejected_node: Optional[str] = None # set when explicit node was not allowlisted
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodePolicy:
|
||||
"""
|
||||
Node resolution policy.
|
||||
|
||||
Attributes:
|
||||
allowed_nodes: Set of valid node names (uppercase).
|
||||
default_node: Fallback node when no explicit or room-map match.
|
||||
room_node_map: Optional per-room override (room_id → node_id).
|
||||
"""
|
||||
allowed_nodes: FrozenSet[str]
|
||||
default_node: str
|
||||
room_node_map: Dict[str, str] = field(default_factory=dict)
|
||||
|
||||
def resolve(
|
||||
self,
|
||||
room_id: str,
|
||||
explicit_node: Optional[str] = None,
|
||||
store_override: Optional[str] = None,
|
||||
) -> NodeResolution:
|
||||
"""
|
||||
Resolve target node for a message.
|
||||
|
||||
Priority (highest → lowest):
|
||||
1. explicit_node kwarg (user-supplied, mixed rooms only)
|
||||
2. store_override — dynamic PolicyStore entry (M6.0)
|
||||
3. room_node_map — static BRIDGE_ROOM_NODE_MAP env entry
|
||||
4. default_node
|
||||
"""
|
||||
if explicit_node is not None:
|
||||
upper = explicit_node.upper()
|
||||
if upper in self.allowed_nodes:
|
||||
return NodeResolution(node_id=upper, source=NODE_SOURCE_EXPLICIT)
|
||||
# Rejected — report bad value and fall through to best available
|
||||
fallback = self._fallback(room_id, store_override)
|
||||
return NodeResolution(
|
||||
node_id=fallback.node_id,
|
||||
source=fallback.source,
|
||||
rejected_node=upper,
|
||||
)
|
||||
|
||||
return self._fallback(room_id, store_override)
|
||||
|
||||
def _fallback(
|
||||
self,
|
||||
room_id: str,
|
||||
store_override: Optional[str] = None,
|
||||
) -> NodeResolution:
|
||||
"""Resolve node without an explicit kwarg (store → env map → default)."""
|
||||
if store_override is not None:
|
||||
upper = store_override.upper()
|
||||
if upper in self.allowed_nodes:
|
||||
return NodeResolution(node_id=upper, source=NODE_SOURCE_STORE)
|
||||
|
||||
if room_id in self.room_node_map:
|
||||
mapped = self.room_node_map[room_id].upper()
|
||||
if mapped in self.allowed_nodes:
|
||||
return NodeResolution(node_id=mapped, source=NODE_SOURCE_ROOM_MAP)
|
||||
|
||||
return NodeResolution(node_id=self.default_node, source=NODE_SOURCE_DEFAULT)
|
||||
|
||||
def as_info_dict(self) -> dict:
|
||||
"""Return a safe dict for health/ops snapshots (no secrets)."""
|
||||
return {
|
||||
"default_node": self.default_node,
|
||||
"allowed_nodes": sorted(self.allowed_nodes),
|
||||
"room_overrides": len(self.room_node_map),
|
||||
}
|
||||
|
||||
|
||||
def parse_node_policy(
|
||||
raw_allowed: str,
|
||||
default_node: str,
|
||||
raw_room_map: str,
|
||||
) -> NodePolicy:
|
||||
"""
|
||||
Parse node policy from env-style config strings.
|
||||
|
||||
raw_allowed: "NODA1,NODA2"
|
||||
default_node: "NODA1"
|
||||
raw_room_map: "!roomA:server=NODA2;!roomB:server=NODA1"
|
||||
"""
|
||||
default = default_node.strip().upper() or "NODA1"
|
||||
|
||||
allowed: FrozenSet[str] = frozenset(
|
||||
n.strip().upper() for n in raw_allowed.split(",") if n.strip()
|
||||
)
|
||||
if not allowed:
|
||||
allowed = frozenset([default])
|
||||
elif default not in allowed:
|
||||
# default must always be reachable
|
||||
allowed = allowed | frozenset([default])
|
||||
|
||||
room_map: Dict[str, str] = {}
|
||||
for entry in raw_room_map.split(";"):
|
||||
entry = entry.strip()
|
||||
if not entry or "=" not in entry:
|
||||
continue
|
||||
room_id_raw, node_raw = entry.split("=", 1)
|
||||
room_id = room_id_raw.strip()
|
||||
node = node_raw.strip().upper()
|
||||
if room_id and node:
|
||||
room_map[room_id] = node
|
||||
|
||||
return NodePolicy(
|
||||
allowed_nodes=allowed,
|
||||
default_node=default,
|
||||
room_node_map=room_map,
|
||||
)
|
||||
|
||||
|
||||
def extract_node_kwarg(text: str) -> Tuple[Optional[str], str]:
|
||||
"""
|
||||
Extract 'node=X' kwarg from message text.
|
||||
|
||||
Returns (node_id_or_None, cleaned_text_without_kwarg).
|
||||
Preserves the rest of the message — no other transformations.
|
||||
|
||||
Example:
|
||||
"/sofiia node=NODA2 Hello!"
|
||||
→ ("NODA2", "/sofiia Hello!")
|
||||
"""
|
||||
m = _NODE_KWARG_RE.search(text)
|
||||
if m:
|
||||
node = m.group(1).upper()
|
||||
cleaned = _NODE_KWARG_RE.sub("", text, count=1)
|
||||
# Collapse runs of whitespace introduced by the removal
|
||||
cleaned = " ".join(cleaned.split())
|
||||
return node, cleaned
|
||||
return None, text
|
||||
|
||||
|
||||
def node_rejected_reply(requested: str, allowed: FrozenSet[str]) -> str:
|
||||
"""Reply when user requests a node not in the allowlist."""
|
||||
allowed_list = ", ".join(f"`{n}`" for n in sorted(allowed))
|
||||
return (
|
||||
f"⚠️ Unknown node: `{requested}`\n"
|
||||
f"Allowed: {allowed_list}\n"
|
||||
f"_Example: `/sofiia node=NODA1 Hello!`_"
|
||||
)
|
||||
1007
services/matrix-bridge-dagi/app/policy_store.py
Normal file
1007
services/matrix-bridge-dagi/app/policy_store.py
Normal file
File diff suppressed because it is too large
Load Diff
149
services/matrix-bridge-dagi/app/sticky_cache.py
Normal file
149
services/matrix-bridge-dagi/app/sticky_cache.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""
|
||||
StickyNodeCache — M8.1: anti-flap sticky routing after soft-failover.
|
||||
|
||||
After a successful failover (primary → fallback), the bridge remembers the
|
||||
fallback node per room:agent pair for `ttl_s` seconds. Subsequent messages
|
||||
for the same pair skip the primary entirely and go directly to the known-good
|
||||
fallback, preventing oscillation ("flapping") while the primary recovers.
|
||||
|
||||
Key design
|
||||
----------
|
||||
key = "{room_id}:{agent_id}"
|
||||
ttl = FAILOVER_STICKY_TTL_S (default 300 s)
|
||||
|
||||
Priority in routing (when source != explicit):
|
||||
1. sticky cache (temporary)
|
||||
2. store override (desired long-term policy)
|
||||
3. env room_node_map
|
||||
4. env default
|
||||
|
||||
Sticky expires naturally; recovery is automatic — no operator action needed.
|
||||
If the sticky node also fails, the entry is removed and normal failover logic
|
||||
takes over again.
|
||||
|
||||
Thread safety
|
||||
-------------
|
||||
Uses threading.RLock — safe to call from asyncio callbacks without to_thread.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Tuple
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_TTL_S = 300.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class _StickyEntry:
|
||||
node_id: str
|
||||
expires_at: float # time.monotonic() deadline
|
||||
|
||||
|
||||
class StickyNodeCache:
|
||||
"""
|
||||
In-memory sticky node preference cache.
|
||||
|
||||
Usage:
|
||||
cache = StickyNodeCache(ttl_s=300)
|
||||
|
||||
# After successful failover:
|
||||
cache.set("!room:srv:sofiia", "NODA2")
|
||||
|
||||
# Before routing the next message:
|
||||
node = cache.get("!room:srv:sofiia") # → "NODA2" or None if expired/missing
|
||||
|
||||
# If sticky node also fails:
|
||||
cache.delete("!room:srv:sofiia")
|
||||
"""
|
||||
|
||||
def __init__(self, ttl_s: float = _DEFAULT_TTL_S) -> None:
|
||||
if ttl_s <= 0:
|
||||
raise ValueError(f"ttl_s must be > 0, got {ttl_s}")
|
||||
self._ttl_s = ttl_s
|
||||
self._cache: Dict[str, _StickyEntry] = {}
|
||||
self._lock = threading.RLock()
|
||||
|
||||
# ── Public API ────────────────────────────────────────────────────────────
|
||||
|
||||
def set(self, key: str, node_id: str, ttl_s: Optional[float] = None) -> None:
|
||||
"""Set sticky preference; overwrites existing entry."""
|
||||
ttl = ttl_s if ttl_s is not None else self._ttl_s
|
||||
with self._lock:
|
||||
self._cache[key] = _StickyEntry(
|
||||
node_id=node_id,
|
||||
expires_at=time.monotonic() + ttl,
|
||||
)
|
||||
logger.debug("StickyCache.set: key=%s node=%s ttl=%.0fs", key, node_id, ttl)
|
||||
|
||||
def get(self, key: str) -> Optional[str]:
|
||||
"""
|
||||
Return sticky node_id if entry exists and not expired; else None.
|
||||
Expired entries are lazily removed on access.
|
||||
"""
|
||||
with self._lock:
|
||||
entry = self._cache.get(key)
|
||||
if entry is None:
|
||||
return None
|
||||
if time.monotonic() >= entry.expires_at:
|
||||
del self._cache[key]
|
||||
logger.debug("StickyCache.expired: key=%s node=%s", key, entry.node_id)
|
||||
return None
|
||||
return entry.node_id
|
||||
|
||||
def delete(self, key: str) -> bool:
|
||||
"""Remove an entry. Returns True if it existed."""
|
||||
with self._lock:
|
||||
existed = key in self._cache
|
||||
self._cache.pop(key, None)
|
||||
if existed:
|
||||
logger.debug("StickyCache.delete: key=%s", key)
|
||||
return existed
|
||||
|
||||
def active_count(self) -> int:
|
||||
"""Count of non-expired entries (best-effort; no eviction)."""
|
||||
now = time.monotonic()
|
||||
with self._lock:
|
||||
return sum(1 for e in self._cache.values() if e.expires_at > now)
|
||||
|
||||
def active_entries(self) -> List[Tuple[str, str, float]]:
|
||||
"""
|
||||
Return (key, node_id, ttl_remaining_s) for all non-expired entries.
|
||||
Useful for ops visibility in !status/!nodes.
|
||||
"""
|
||||
now = time.monotonic()
|
||||
with self._lock:
|
||||
result = []
|
||||
for k, e in self._cache.items():
|
||||
remaining = e.expires_at - now
|
||||
if remaining > 0:
|
||||
result.append((k, e.node_id, remaining))
|
||||
return sorted(result, key=lambda x: x[0])
|
||||
|
||||
def cleanup(self) -> int:
|
||||
"""
|
||||
Remove all expired entries.
|
||||
Call periodically (e.g. in a background task) to reclaim memory.
|
||||
Returns count of removed entries.
|
||||
"""
|
||||
now = time.monotonic()
|
||||
with self._lock:
|
||||
expired_keys = [k for k, e in self._cache.items() if e.expires_at <= now]
|
||||
for k in expired_keys:
|
||||
del self._cache[k]
|
||||
if expired_keys:
|
||||
logger.debug("StickyCache.cleanup: removed %d expired entries", len(expired_keys))
|
||||
return len(expired_keys)
|
||||
|
||||
@property
|
||||
def ttl_s(self) -> float:
|
||||
return self._ttl_s
|
||||
|
||||
|
||||
def make_sticky_key(room_id: str, agent_id: str) -> str:
|
||||
"""Canonical sticky cache key for a room+agent pair."""
|
||||
return f"{room_id}:{agent_id}"
|
||||
@@ -4,3 +4,4 @@ httpx>=0.25.0
|
||||
python-dotenv>=1.0.0
|
||||
prometheus-client>=0.20.0
|
||||
pyyaml>=6.0
|
||||
aiosqlite>=0.19.0
|
||||
|
||||
Reference in New Issue
Block a user