feat(platform): add new services, tools, tests and crews modules
New router intelligence modules (26 files): alert_ingest/store, audit_store, architecture_pressure, backlog_generator/store, cost_analyzer, data_governance, dependency_scanner, drift_analyzer, incident_* (5 files), llm_enrichment, platform_priority_digest, provider_budget, release_check_runner, risk_* (6 files), signature_state_store, sofiia_auto_router, tool_governance New services: - sofiia-console: Dockerfile, adapters/, monitor/nodes/ops/voice modules, launchd, react static - memory-service: integration_endpoints, integrations, voice_endpoints, static UI - aurora-service: full app suite (analysis, job_store, orchestrator, reporting, schemas, subagents) - sofiia-supervisor: new supervisor service - aistalk-bridge-lite: Telegram bridge lite - calendar-service: CalDAV calendar service with reminders - mlx-stt-service / mlx-tts-service: Apple Silicon speech services - binance-bot-monitor: market monitor service - node-worker: STT/TTS memory providers New tools (9): agent_email, browser_tool, contract_tool, observability_tool, oncall_tool, pr_reviewer_tool, repo_tool, safe_code_executor, secure_vault New crews: agromatrix_crew (10 modules: depth_classifier, doc_facts, doc_focus, farm_state, light_reply, llm_factory, memory_manager, proactivity, reflection_engine, session_context, style_adapter, telemetry) Tests: 85+ test files for all new modules Made-with: Cursor
This commit is contained in:
0
tools/pr_reviewer_tool/tests/__init__.py
Normal file
0
tools/pr_reviewer_tool/tests/__init__.py
Normal file
305
tools/pr_reviewer_tool/tests/test_pr_reviewer.py
Normal file
305
tools/pr_reviewer_tool/tests/test_pr_reviewer.py
Normal file
@@ -0,0 +1,305 @@
|
||||
"""
|
||||
Tests for PR Reviewer Tool
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||
|
||||
from services.router.tool_manager import ToolManager, ToolResult
|
||||
|
||||
|
||||
class TestPRReviewerToolSecurity:
|
||||
"""Test security features of PR Reviewer Tool"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_diff_too_large_rejected(self):
|
||||
"""Test that diff exceeding max_chars is rejected"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
large_diff = "x" * 500000 # 500KB
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {
|
||||
"text": large_diff,
|
||||
"max_chars": 400000
|
||||
}
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "too large" in result.error.lower()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_too_many_files_rejected(self):
|
||||
"""Test that diff with too many files is rejected"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
# Create diff with 201 files
|
||||
diff_text = ""
|
||||
for i in range(201):
|
||||
diff_text += f"""diff --git a/file{i}.py b/file{i}.py
|
||||
--- a/file{i}.py
|
||||
+++ b/file{i}.py
|
||||
@@ -1,1 +1,2 @@
|
||||
-old
|
||||
+new
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {
|
||||
"text": diff_text,
|
||||
"max_files": 200
|
||||
}
|
||||
})
|
||||
|
||||
assert result.success is False
|
||||
assert "too many files" in result.error.lower()
|
||||
|
||||
|
||||
class TestPRReviewerToolBlocking:
|
||||
"""Test blocking issue detection"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_secrets_detected(self):
|
||||
"""Test that secrets in diff are detected"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/config.py b/config.py
|
||||
--- a/config.py
|
||||
+++ b/config.py
|
||||
@@ -1,2 +1,3 @@
|
||||
# config
|
||||
-API_KEY = "sk-live-abc123def456"
|
||||
+API_KEY = "sk-live-xyz789"
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "blocking_only",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert len(result.result["blocking_issues"]) > 0
|
||||
# Evidence should be masked
|
||||
for issue in result.result["blocking_issues"]:
|
||||
assert "sk-live" not in issue.get("evidence", "")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_rce_detected(self):
|
||||
"""Test that RCE patterns are detected"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/app.py b/app.py
|
||||
--- a/app.py
|
||||
+++ b/app.py
|
||||
@@ -1,3 +1,4 @@
|
||||
import os
|
||||
+os.system("rm -rf /")
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "blocking_only",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
blocking = result.result["blocking_issues"]
|
||||
assert any("RCE" in i.get("title", "") for i in blocking)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_sql_injection_detected(self):
|
||||
"""Test that SQL injection patterns are detected"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/db.py b/db.py
|
||||
--- a/db.py
|
||||
+++ b/db.py
|
||||
@@ -1,3 +1,4 @@
|
||||
query = "SELECT * FROM users WHERE id = " + user_input
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "blocking_only",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
blocking = result.result["blocking_issues"]
|
||||
assert any("SQL" in i.get("title", "") for i in blocking)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_auth_bypass_detected(self):
|
||||
"""Test that auth bypass patterns are detected"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/auth.py b/auth.py
|
||||
--- a/auth.py
|
||||
+++ b/auth.py
|
||||
@@ -1,3 +1,4 @@
|
||||
+# Skip auth for testing
|
||||
+if os.getenv("TESTING"):
|
||||
+ return True
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "blocking_only",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
blocking = result.result["blocking_issues"]
|
||||
assert any("AUTH" in i.get("title", "") or "SECURITY" in i.get("title", "") for i in blocking)
|
||||
|
||||
|
||||
class TestPRReviewerToolModes:
|
||||
"""Test different modes of PR Reviewer"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_blocking_only_mode(self):
|
||||
"""Test blocking_only mode returns only blocking issues"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
+print("debug")
|
||||
+# TODO: fix this
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "blocking_only",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
# Should have no issues in blocking_only for non-critical
|
||||
assert "issues" in result.result
|
||||
# Should have summary
|
||||
assert "summary" in result.result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_full_review_mode(self):
|
||||
"""Test full_review mode returns all issues"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
+print("debug")
|
||||
+except: pass
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
# Should have issues in full_review
|
||||
assert len(result.result["issues"]) > 0
|
||||
|
||||
|
||||
class TestPRReviewerToolScoring:
|
||||
"""Test scoring functionality"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_risk_scoring(self):
|
||||
"""Test that risk score reflects issues"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
# Diff with secrets - high risk
|
||||
diff = """diff --git a/config.py b/config.py
|
||||
--- a/config.py
|
||||
+++ b/config.py
|
||||
@@ -1,2 +1,3 @@
|
||||
+API_KEY = "sk-live-secret123"
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert "score" in result.result
|
||||
assert result.result["score"]["risk"] > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_security_scoring(self):
|
||||
"""Test security score decreases with findings"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
# Clean diff - no issues
|
||||
diff = """diff --git a/good.py b/good.py
|
||||
--- a/good.py
|
||||
+++ b/good.py
|
||||
@@ -1,2 +1,3 @@
|
||||
+def hello():
|
||||
+ return "world"
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {"text": diff}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
# Should have high security score (no findings)
|
||||
assert result.result["score"]["security"] >= 70
|
||||
|
||||
|
||||
class TestPRReviewerToolChecklists:
|
||||
"""Test checklist generation"""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_tests_checklist(self):
|
||||
"""Test tests checklist is generated"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/new.py b/new.py
|
||||
--- /dev/null
|
||||
+++ b/new.py
|
||||
@@ -0,0 +1,3 @@
|
||||
+def new_func():
|
||||
+ pass
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {"text": diff},
|
||||
"options": {"include_tests_checklist": True}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert len(result.result["tests_checklist"]) > 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_deploy_checklist(self):
|
||||
"""Test deploy checklist is generated"""
|
||||
tool_mgr = ToolManager({})
|
||||
|
||||
diff = """diff --git a/app.py b/app.py
|
||||
--- a/app.py
|
||||
+++ b/app.py
|
||||
@@ -1,2 +1,3 @@
|
||||
+print("hello")
|
||||
"""
|
||||
|
||||
result = await tool_mgr._pr_reviewer_tool({
|
||||
"mode": "full_review",
|
||||
"diff": {"text": diff},
|
||||
"options": {"include_deploy_risks": True}
|
||||
})
|
||||
|
||||
assert result.success is True
|
||||
assert len(result.result["deploy_checklist"]) > 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user