### Backend (city-service) - Node Registry + Self-Healing API (migration 039) - Improved get_all_nodes() with robust fallback for node_registry/node_cache - Agent Prompts Runtime API for DAGI Router integration - DAGI Router Audit endpoints (phantom/stale detection) - Node Agents API (Guardian/Steward) - Node metrics extended (CPU/GPU/RAM/Disk) ### Frontend (apps/web) - Node Directory with improved error handling - Node Cabinet with metrics cards - DAGI Router Card component - Node Metrics Card component - useDAGIAudit hook ### Scripts - check-invariants.py - deploy verification - node-bootstrap.sh - node self-registration - node-guardian-loop.py - continuous self-healing - dagi_agent_audit.py - DAGI audit utility ### Migrations - 034: Agent prompts seed - 035: Agent DAGI audit - 036: Node metrics extended - 037: Node agents complete - 038: Agent prompts full coverage - 039: Node registry self-healing ### Tests - test_infra_smoke.py - test_agent_prompts_runtime.py - test_dagi_router_api.py ### Documentation - DEPLOY_CHECKLIST_2024_11_30.md - Multiple TASK_PHASE docs
327 lines
11 KiB
Python
327 lines
11 KiB
Python
"""
|
||
Tests for Agent System Prompts Runtime API
|
||
|
||
Тести для Agent System Prompts MVP v2:
|
||
- Runtime prompts API
|
||
- build_system_prompt function
|
||
- Prompts status check API
|
||
"""
|
||
|
||
import pytest
|
||
import asyncio
|
||
from typing import Dict, Any
|
||
|
||
# Mock functions for testing without database
|
||
def build_system_prompt_from_parts(
|
||
prompts: Dict[str, str],
|
||
agent_info: Dict[str, Any] = None,
|
||
context: Dict[str, Any] = None
|
||
) -> str:
|
||
"""Build system prompt from parts (mock implementation for testing)"""
|
||
parts = []
|
||
|
||
# Core prompt (required)
|
||
if prompts.get("core"):
|
||
parts.append(prompts["core"])
|
||
elif agent_info:
|
||
agent_name = agent_info.get("display_name") or agent_info.get("name") or "Agent"
|
||
agent_kind = agent_info.get("kind") or "assistant"
|
||
parts.append(
|
||
f"You are {agent_name}, an AI {agent_kind} in DAARION.city ecosystem. "
|
||
f"Be helpful, accurate, and follow ethical guidelines."
|
||
)
|
||
else:
|
||
parts.append("You are an AI assistant. Be helpful and accurate.")
|
||
|
||
# Governance rules
|
||
if prompts.get("governance"):
|
||
parts.append("\n\n## Governance\n" + prompts["governance"])
|
||
|
||
# Safety guidelines
|
||
if prompts.get("safety"):
|
||
parts.append("\n\n## Safety Guidelines\n" + prompts["safety"])
|
||
|
||
# Tools instructions
|
||
if prompts.get("tools"):
|
||
parts.append("\n\n## Tools & Capabilities\n" + prompts["tools"])
|
||
|
||
# Context additions
|
||
if context:
|
||
context_lines = []
|
||
|
||
if context.get("node"):
|
||
node = context["node"]
|
||
context_lines.append(f"- **Node**: {node.get('name', 'Unknown')}")
|
||
|
||
if context.get("district"):
|
||
district = context["district"]
|
||
context_lines.append(f"- **District**: {district.get('name', 'Unknown')}")
|
||
|
||
if context.get("microdao"):
|
||
microdao = context["microdao"]
|
||
context_lines.append(f"- **MicroDAO**: {microdao.get('name', 'Unknown')}")
|
||
|
||
if context_lines:
|
||
parts.append("\n\n## Current Context\n" + "\n".join(context_lines))
|
||
|
||
return "\n".join(parts)
|
||
|
||
|
||
class TestBuildSystemPrompt:
|
||
"""Tests for build_system_prompt function"""
|
||
|
||
def test_core_only(self):
|
||
"""Test with only core prompt"""
|
||
prompts = {
|
||
"core": "You are DAARWIZZ, the global orchestrator.",
|
||
"safety": None,
|
||
"governance": None,
|
||
"tools": None
|
||
}
|
||
|
||
result = build_system_prompt_from_parts(prompts)
|
||
|
||
assert "DAARWIZZ" in result
|
||
assert "orchestrator" in result
|
||
assert "## Safety" not in result
|
||
assert "## Governance" not in result
|
||
|
||
def test_full_prompts(self):
|
||
"""Test with all prompt types"""
|
||
prompts = {
|
||
"core": "You are DAARWIZZ, the global orchestrator of DAARION.city.",
|
||
"safety": "Never execute irreversible actions without confirmation.",
|
||
"governance": "Coordinate with district leads for resource allocation.",
|
||
"tools": "Use agent_delegate to delegate tasks."
|
||
}
|
||
|
||
result = build_system_prompt_from_parts(prompts)
|
||
|
||
assert "DAARWIZZ" in result
|
||
assert "## Safety Guidelines" in result
|
||
assert "irreversible" in result
|
||
assert "## Governance" in result
|
||
assert "district leads" in result
|
||
assert "## Tools" in result
|
||
assert "agent_delegate" in result
|
||
|
||
def test_fallback_without_core(self):
|
||
"""Test fallback when no core prompt provided"""
|
||
prompts = {
|
||
"core": None,
|
||
"safety": "Be safe",
|
||
"governance": None,
|
||
"tools": None
|
||
}
|
||
agent_info = {
|
||
"name": "TestAgent",
|
||
"display_name": "Test Agent",
|
||
"kind": "coordinator"
|
||
}
|
||
|
||
result = build_system_prompt_from_parts(prompts, agent_info)
|
||
|
||
assert "Test Agent" in result
|
||
assert "coordinator" in result
|
||
assert "## Safety Guidelines" in result
|
||
assert "Be safe" in result
|
||
|
||
def test_with_context(self):
|
||
"""Test prompt with runtime context"""
|
||
prompts = {
|
||
"core": "You are a node agent.",
|
||
"safety": None,
|
||
"governance": None,
|
||
"tools": None
|
||
}
|
||
context = {
|
||
"node": {"name": "NODE1", "environment": "production"},
|
||
"district": {"name": "ENERGYUNION"},
|
||
"microdao": {"name": "DAARION"}
|
||
}
|
||
|
||
result = build_system_prompt_from_parts(prompts, context=context)
|
||
|
||
assert "node agent" in result
|
||
assert "## Current Context" in result
|
||
assert "NODE1" in result
|
||
assert "ENERGYUNION" in result
|
||
assert "DAARION" in result
|
||
|
||
def test_prompt_order(self):
|
||
"""Test that prompts are assembled in correct order"""
|
||
prompts = {
|
||
"core": "CORE_MARKER",
|
||
"safety": "SAFETY_MARKER",
|
||
"governance": "GOVERNANCE_MARKER",
|
||
"tools": "TOOLS_MARKER"
|
||
}
|
||
|
||
result = build_system_prompt_from_parts(prompts)
|
||
|
||
# Check order: core → governance → safety → tools
|
||
core_pos = result.find("CORE_MARKER")
|
||
gov_pos = result.find("GOVERNANCE_MARKER")
|
||
safety_pos = result.find("SAFETY_MARKER")
|
||
tools_pos = result.find("TOOLS_MARKER")
|
||
|
||
assert core_pos < gov_pos < safety_pos < tools_pos
|
||
|
||
|
||
class TestRuntimePromptsFormat:
|
||
"""Tests for runtime prompts response format"""
|
||
|
||
def test_response_structure(self):
|
||
"""Test expected response structure"""
|
||
expected_keys = {"agent_id", "has_prompts", "prompts"}
|
||
|
||
# Mock response
|
||
response = {
|
||
"agent_id": "agent-daarwizz",
|
||
"has_prompts": True,
|
||
"prompts": {
|
||
"core": "You are DAARWIZZ...",
|
||
"safety": "Safety rules...",
|
||
"governance": None,
|
||
"tools": None
|
||
}
|
||
}
|
||
|
||
assert set(response.keys()) == expected_keys
|
||
assert response["has_prompts"] is True
|
||
assert "core" in response["prompts"]
|
||
assert "safety" in response["prompts"]
|
||
assert "governance" in response["prompts"]
|
||
assert "tools" in response["prompts"]
|
||
|
||
def test_has_prompts_when_core_exists(self):
|
||
"""Test has_prompts is True when core exists"""
|
||
prompts = {"core": "Some core prompt", "safety": None, "governance": None, "tools": None}
|
||
has_prompts = prompts.get("core") is not None
|
||
assert has_prompts is True
|
||
|
||
def test_has_prompts_when_core_missing(self):
|
||
"""Test has_prompts is False when core is None"""
|
||
prompts = {"core": None, "safety": "Safety only", "governance": None, "tools": None}
|
||
has_prompts = prompts.get("core") is not None
|
||
assert has_prompts is False
|
||
|
||
|
||
class TestPromptsStatusBatch:
|
||
"""Tests for batch prompts status check"""
|
||
|
||
def test_status_response_format(self):
|
||
"""Test batch status response format"""
|
||
agent_ids = ["agent-daarwizz", "agent-devtools", "agent-unknown"]
|
||
|
||
# Mock response
|
||
response = {
|
||
"status": {
|
||
"agent-daarwizz": True,
|
||
"agent-devtools": True,
|
||
"agent-unknown": False
|
||
}
|
||
}
|
||
|
||
assert "status" in response
|
||
assert isinstance(response["status"], dict)
|
||
assert all(aid in response["status"] for aid in agent_ids)
|
||
assert all(isinstance(v, bool) for v in response["status"].values())
|
||
|
||
|
||
class TestNodeAgentPrompts:
|
||
"""Tests for Node Agent specific prompts"""
|
||
|
||
def test_node_guardian_prompt_content(self):
|
||
"""Test Node Guardian has appropriate content markers"""
|
||
guardian_core = """Ти — Node Guardian для НОДА1 (Hetzner GEX44 Production).
|
||
Твоя місія: забезпечувати стабільну роботу продакшн-інфраструктури DAARION.city."""
|
||
|
||
assert "Node Guardian" in guardian_core
|
||
assert "НОДА1" in guardian_core
|
||
assert "Production" in guardian_core or "production" in guardian_core.lower()
|
||
|
||
def test_node_guardian_safety_rules(self):
|
||
"""Test Node Guardian safety rules"""
|
||
guardian_safety = """Ніколи не виконуй деструктивні команди без підтвердження.
|
||
Не розкривай чутливу інформацію (паролі, API ключі).
|
||
При невизначеності — ескалюй до людини."""
|
||
|
||
assert "деструктивні" in guardian_safety
|
||
assert "підтвердження" in guardian_safety
|
||
assert "ескалюй" in guardian_safety
|
||
|
||
|
||
class TestAgentCoverage:
|
||
"""Tests for agent prompts coverage requirements"""
|
||
|
||
REQUIRED_AGENTS = [
|
||
# City / Core
|
||
"agent-daarwizz",
|
||
"agent-microdao-orchestrator",
|
||
"agent-devtools",
|
||
# District / MicroDAO
|
||
"agent-greenfood",
|
||
"agent-helion",
|
||
"agent-soul",
|
||
"agent-druid",
|
||
"agent-nutra",
|
||
"agent-eonarch",
|
||
"agent-clan",
|
||
"agent-yaromir",
|
||
"agent-monitor",
|
||
# Node Agents
|
||
"monitor-node1",
|
||
"monitor-node2",
|
||
"node-steward-node1",
|
||
"node-steward-node2"
|
||
]
|
||
|
||
def test_required_agents_list(self):
|
||
"""Test required agents are defined"""
|
||
assert len(self.REQUIRED_AGENTS) == 16
|
||
assert "agent-daarwizz" in self.REQUIRED_AGENTS
|
||
assert "monitor-node1" in self.REQUIRED_AGENTS
|
||
assert "monitor-node2" in self.REQUIRED_AGENTS
|
||
|
||
|
||
# Integration tests (require running services)
|
||
class TestIntegration:
|
||
"""Integration tests - skip if services not available"""
|
||
|
||
@pytest.mark.skip(reason="Requires running services")
|
||
async def test_fetch_runtime_prompts(self):
|
||
"""Test fetching runtime prompts from API"""
|
||
import httpx
|
||
|
||
async with httpx.AsyncClient() as client:
|
||
response = await client.get(
|
||
"http://localhost:7001/internal/agents/agent-daarwizz/prompts/runtime"
|
||
)
|
||
|
||
assert response.status_code == 200
|
||
data = response.json()
|
||
assert data["agent_id"] == "agent-daarwizz"
|
||
assert "prompts" in data
|
||
|
||
@pytest.mark.skip(reason="Requires running services")
|
||
async def test_fetch_system_prompt(self):
|
||
"""Test fetching full system prompt from API"""
|
||
import httpx
|
||
|
||
async with httpx.AsyncClient() as client:
|
||
response = await client.get(
|
||
"http://localhost:7001/internal/agents/agent-daarwizz/system-prompt"
|
||
)
|
||
|
||
assert response.status_code == 200
|
||
data = response.json()
|
||
assert data["agent_id"] == "agent-daarwizz"
|
||
assert "system_prompt" in data
|
||
assert len(data["system_prompt"]) > 100
|
||
|
||
|
||
if __name__ == "__main__":
|
||
pytest.main([__file__, "-v"])
|
||
|