feat(sofiia-console): add auto-evidence and post-review generation from runbook runs

- adds runbook_artifacts.py: server-side render of release_evidence.md and
  post_review.md from DB step results (no shell); saves to
  SOFIIA_DATA_DIR/release_artifacts/<run_id>/
- evidence: auto-fills preflight/smoke/script outcomes, step table, timestamps
- post_review: auto-fills metadata, smoke results, incidents from step statuses;
  leaves [TODO] markers for manual observation sections
- adds POST /api/runbooks/runs/{run_id}/evidence and /post_review endpoints
- updates runbook_runs.evidence_path in DB after render
- adds 11 tests covering file creation, key sections, TODO markers, 404s, API

Made-with: Cursor
This commit is contained in:
Apple
2026-03-03 05:07:52 -08:00
parent 0603184524
commit 8879da1e7f
3 changed files with 665 additions and 0 deletions

View File

@@ -0,0 +1,388 @@
"""
Runbook artifacts renderer — PR4.
Generates two markdown artifacts from runbook run DB data (no shell required):
- release_evidence.md (aligned with docs/runbook/release-evidence-template.md)
- post_review.md (aligned with docs/release/sofiia-console-post-release-review-template.md)
Output path: ${SOFIIA_DATA_DIR}/release_artifacts/<run_id>/
"""
from __future__ import annotations
import json
import logging
import os
import time
from datetime import datetime, timezone
from pathlib import Path
from typing import Any, Dict, List, Optional
from . import db as _db
logger = logging.getLogger(__name__)
def _artifacts_dir(run_id: str) -> Path:
"""${SOFIIA_DATA_DIR}/release_artifacts/<run_id>/"""
data_dir = os.getenv("SOFIIA_DATA_DIR", "/tmp/sofiia-data")
return Path(data_dir) / "release_artifacts" / run_id
def _iso_utc(ts: Optional[float]) -> str:
if not ts:
return ""
return datetime.fromtimestamp(ts, tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S UTC")
def _duration_str(started: Optional[float], finished: Optional[float]) -> str:
if not started or not finished:
return "?"
secs = finished - started
if secs < 60:
return f"{secs:.1f}s"
return f"{secs / 60:.1f}min"
async def _load_run(run_id: str) -> Optional[Dict[str, Any]]:
conn = await _db.get_db()
async with conn.execute(
"SELECT run_id, runbook_path, status, current_step, created_at, started_at, "
"finished_at, operator_id, node_id, sofiia_url, evidence_path "
"FROM runbook_runs WHERE run_id = ?",
(run_id,),
) as cur:
row = await cur.fetchone()
if not row:
return None
return {
"run_id": row[0],
"runbook_path": row[1],
"status": row[2],
"current_step": row[3],
"created_at": row[4],
"started_at": row[5],
"finished_at": row[6],
"operator_id": row[7],
"node_id": row[8],
"sofiia_url": row[9],
"evidence_path": row[10],
}
async def _load_steps(run_id: str) -> List[Dict[str, Any]]:
conn = await _db.get_db()
async with conn.execute(
"SELECT step_index, title, section, action_type, action_json, status, "
"result_json, started_at, finished_at "
"FROM runbook_steps WHERE run_id = ? ORDER BY step_index",
(run_id,),
) as cur:
rows = await cur.fetchall()
return [
{
"step_index": r[0],
"title": r[1],
"section": r[2],
"action_type": r[3],
"action_json": json.loads(r[4]) if r[4] else {},
"status": r[5],
"result": json.loads(r[6]) if r[6] else {},
"started_at": r[7],
"finished_at": r[8],
}
for r in (rows or [])
]
def _step_status_icon(status: str) -> str:
return {"ok": "", "warn": "⚠️", "fail": "", "skipped": "⏭️", "pending": ""}.get(status, "")
def _format_result_line(step: Dict[str, Any]) -> str:
"""Compact one-line summary of step result."""
result = step.get("result") or {}
action_type = step.get("action_type", "")
status = step.get("status", "pending")
icon = _step_status_icon(status)
if action_type == "http_check":
code = result.get("status_code", "?")
ok = result.get("ok")
expected = result.get("expected", [])
return f"{icon} HTTP {result.get('method','GET')} → `{code}` (expected: {expected}) — {'ok' if ok else 'FAIL'}"
if action_type == "script":
exit_code = result.get("exit_code", "?")
timed_out = result.get("timeout", False)
warn = result.get("warning", "")
suffix = " ⚠️ running_as_root" if warn == "running_as_root" else ""
suffix += " ⏰ TIMEOUT" if timed_out else ""
return f"{icon} exit_code={exit_code}{suffix}"
# manual
notes = (result.get("notes") or "")[:80]
return f"{icon} manual — {notes or status}"
# ── Release Evidence ──────────────────────────────────────────────────────────
def _render_release_evidence(run: Dict[str, Any], steps: List[Dict[str, Any]]) -> str:
"""Render release evidence markdown from run + step results."""
run_id = run["run_id"]
operator = run.get("operator_id") or ""
node_id = run.get("node_id") or "NODA2"
started = run.get("started_at")
finished = run.get("finished_at")
duration = _duration_str(started, finished)
# Classify steps by action_type and name
health_step = next((s for s in steps if s["action_type"] == "http_check" and "/api/health" in str(s.get("action_json", {}).get("url_path", ""))), None)
metrics_step = next((s for s in steps if s["action_type"] == "http_check" and "/metrics" in str(s.get("action_json", {}).get("url_path", ""))), None)
audit_step = next((s for s in steps if s["action_type"] == "http_check" and "/api/audit" in str(s.get("action_json", {}).get("url_path", ""))), None)
preflight_step = next((s for s in steps if s["action_type"] == "script" and "preflight" in str(s.get("action_json", {}).get("script", ""))), None)
smoke_step = next((s for s in steps if s["action_type"] == "script" and "idempotency" in str(s.get("action_json", {}).get("script", ""))), None)
evidence_step = next((s for s in steps if s["action_type"] == "script" and "evidence" in str(s.get("action_json", {}).get("script", ""))), None)
def _step_val(s: Optional[Dict], fallback: str = "") -> str:
if not s:
return fallback
return _format_result_line(s)
preflight_outcome = "PASS" if (preflight_step and preflight_step.get("status") in ("ok", "warn")) else ("FAIL" if preflight_step else "not run")
preflight_warns = ""
if preflight_step and preflight_step.get("result", {}).get("warning"):
preflight_warns = f" - {preflight_step['result']['warning']}"
lines = [
f"# Release Evidence — Sofiia Console",
f"",
f"## 1) Release metadata",
f"",
f"- Release ID: `{run_id}`",
f"- Date/Time UTC: {_iso_utc(started)}",
f"- Runbook: `{run['runbook_path']}`",
f"- Operator: `{operator}`",
f"- Target node: `{node_id}`",
f"- Run status: `{run['status']}`",
f"- Duration: {duration}",
f"- Change summary:",
f" - _Generated from runbook run `{run_id}`_",
f"",
f"## 2) Preflight results",
f"",
f"- Command: `STRICT=1 bash ops/preflight_sofiia_console.sh`",
f"- Status: `{preflight_outcome.upper()}`",
f"- WARN summary: {preflight_warns or ''}",
f"- Step detail: {_step_val(preflight_step)}",
f"",
f"## 3) Deploy steps performed",
f"",
f"- {node_id} precheck: `OK`",
f" - Notes: controlled restart via runbook runner",
f"- Rollout method: manual (guided runbook)",
f"",
f"## 4) Smoke evidence",
f"",
f"- `GET /api/health`: {_step_val(health_step)}",
f"- `GET /metrics`: {_step_val(metrics_step)}",
f"- Idempotency A/B smoke: {_step_val(smoke_step)}",
f"- `/api/audit` auth check: {_step_val(audit_step)}",
f"",
f"## 5) Post-release checks",
f"",
f"- Evidence generated: {_step_val(evidence_step)}",
f"- Audit write/read quick check: _manual observation required_",
f"- Retention dry-run: _run manually if needed_",
f"",
f"## 6) All steps summary",
f"",
f"| # | Title | Type | Status | Duration |",
f"|---|-------|------|--------|----------|",
]
for s in steps:
icon = _step_status_icon(s.get("status", "pending"))
dur = _duration_str(s.get("started_at"), s.get("finished_at"))
lines.append(f"| {s['step_index']} | {s['title'][:50]} | `{s['action_type']}` | {icon} `{s['status']}` | {dur} |")
lines += [
f"",
f"## 7) Rollback plan & outcome",
f"",
f"- Rollback needed: `no`",
f"- Final service state: `{run['status']}`",
f"",
f"## 8) Sign-off",
f"",
f"- Generated by: sofiia-console runbook runner",
f"- Timestamp UTC: {_iso_utc(time.time())}",
f"- Run ID: `{run_id}`",
f"",
]
return "\n".join(lines)
# ── Post-Release Review ───────────────────────────────────────────────────────
def _render_post_review(run: Dict[str, Any], steps: List[Dict[str, Any]]) -> str:
"""Render post-release review markdown, auto-filling from run data."""
run_id = run["run_id"]
operator = run.get("operator_id") or ""
node_id = run.get("node_id") or "NODA2"
started = run.get("started_at")
finished = run.get("finished_at")
preflight_step = next((s for s in steps if s["action_type"] == "script" and "preflight" in str(s.get("action_json", {}).get("script", ""))), None)
smoke_step = next((s for s in steps if s["action_type"] == "script" and "idempotency" in str(s.get("action_json", {}).get("script", ""))), None)
health_step = next((s for s in steps if s["action_type"] == "http_check" and "/api/health" in str(s.get("action_json", {}).get("url_path", ""))), None)
metrics_step = next((s for s in steps if s["action_type"] == "http_check" and "/metrics" in str(s.get("action_json", {}).get("url_path", ""))), None)
audit_step = next((s for s in steps if s["action_type"] == "http_check" and "/api/audit" in str(s.get("action_json", {}).get("url_path", ""))), None)
preflight_outcome = "PASS" if (preflight_step and preflight_step.get("status") in ("ok", "warn")) else ("FAIL" if preflight_step else "not run")
preflight_warn_items = " - —"
if preflight_step and preflight_step.get("result", {}).get("warning"):
preflight_warn_items = f" - {preflight_step['result']['warning']}"
def _smoke_result(s: Optional[Dict]) -> str:
if not s:
return "not run"
r = s.get("result", {})
if s["action_type"] == "http_check":
code = r.get("status_code", "?")
ok = r.get("ok")
return f"`{code}` — {'OK' if ok else 'FAIL'}"
exit_code = r.get("exit_code", "?")
return f"exit_code={exit_code} ({'PASS' if exit_code == 0 else 'FAIL'})"
warnings_in_run = [s for s in steps if s.get("status") in ("warn", "fail")]
incidents_section = "- What happened?: —" if not warnings_in_run else "\n".join(
f"- Step `{s['step_index']}` ({s['title'][:40]}): status=`{s['status']}`"
for s in warnings_in_run
)
lines = [
f"# Sofiia Console Post-Release Review",
f"",
f"_Auto-generated from runbook run `{run_id}`. Fill in sections marked [TODO]._",
f"",
f"## 1) Release Metadata",
f"",
f"- Date / Time window: {_iso_utc(started)}{_iso_utc(finished)}",
f"- Target nodes: `{node_id}`",
f"- Runbook: `{run['runbook_path']}`",
f"- Run ID: `{run_id}`",
f"- Operator(s): `{operator}`",
f"- Deployed SHAs:",
f" - sofiia-console: [TODO]",
f" - router: [TODO]",
f" - gateway: [TODO]",
f" - memory-service: [TODO]",
f"",
f"## 2) Preflight Outcome",
f"",
f"- STRICT mode result: `{preflight_outcome}`",
f"- WARN items worth noting:",
f"{preflight_warn_items}",
f"",
f"## 3) Smoke Results",
f"",
f"- `/api/health`: {_smoke_result(health_step)}",
f"- `/metrics`: {_smoke_result(metrics_step)}",
f"- Redis idempotency A/B: {_smoke_result(smoke_step)}",
f"- `/api/audit` auth check (401/200): {_smoke_result(audit_step)}",
f"- Audit write/read quick test: [TODO — manual check]",
f"",
f"## 4) Observed Metrics (first 15-30 min)",
f"",
f"- 5xx count: [TODO]",
f"- `sofiia_rate_limited_total` (chat / operator): [TODO]",
f"- `sofiia_idempotency_replays_total`: [TODO]",
f"- Unexpected spikes?: [TODO]",
f"",
f"## 5) Incidents / Anomalies",
f"",
incidents_section,
f"- Root cause (if known): —",
f"- Mitigation applied: —",
f"- Rollback needed: `no`",
f"",
f"## 6) What Went Well",
f"",
f"- [TODO]",
f"",
f"## 7) What Was Friction",
f"",
f"- Manual steps: [TODO]",
f"- Confusing logs/output: [TODO]",
f"- Missing visibility: [TODO]",
f"",
f"## 8) Action Items",
f"",
f"- [ ] [TODO]",
f"",
f"---",
f"_Generated by sofiia-console runbook runner at {_iso_utc(time.time())}_",
f"",
]
return "\n".join(lines)
# ── Public functions ──────────────────────────────────────────────────────────
async def render_release_evidence(run_id: str) -> Dict[str, Any]:
"""
Generate release evidence markdown from run DB data.
Saves to ${SOFIIA_DATA_DIR}/release_artifacts/<run_id>/release_evidence.md
Updates runbook_runs.evidence_path.
Returns {evidence_path, bytes, created_at}.
"""
run = await _load_run(run_id)
if not run:
raise ValueError(f"Run not found: {run_id}")
steps = await _load_steps(run_id)
out_dir = _artifacts_dir(run_id)
out_dir.mkdir(parents=True, exist_ok=True)
evidence_path = out_dir / "release_evidence.md"
content = _render_release_evidence(run, steps)
evidence_path.write_text(content, encoding="utf-8")
conn = await _db.get_db()
await conn.execute(
"UPDATE runbook_runs SET evidence_path = ? WHERE run_id = ?",
(str(evidence_path), run_id),
)
await conn.commit()
logger.info("Release evidence written: %s (%d bytes)", evidence_path, len(content))
return {
"evidence_path": str(evidence_path),
"bytes": len(content),
"created_at": _iso_utc(time.time()),
"run_id": run_id,
}
async def render_post_review(run_id: str) -> Dict[str, Any]:
"""
Generate post-release review markdown from run DB data.
Saves to ${SOFIIA_DATA_DIR}/release_artifacts/<run_id>/post_review.md
Returns {path, bytes, created_at}.
"""
run = await _load_run(run_id)
if not run:
raise ValueError(f"Run not found: {run_id}")
steps = await _load_steps(run_id)
out_dir = _artifacts_dir(run_id)
out_dir.mkdir(parents=True, exist_ok=True)
review_path = out_dir / "post_review.md"
content = _render_post_review(run, steps)
review_path.write_text(content, encoding="utf-8")
logger.info("Post-review written: %s (%d bytes)", review_path, len(content))
return {
"path": str(review_path),
"bytes": len(content),
"created_at": _iso_utc(time.time()),
"run_id": run_id,
}

View File

@@ -1,5 +1,6 @@
""" """
Runbook runs API — create run, get run, next step, complete step, abort (PR2). Runbook runs API — create run, get run, next step, complete step, abort (PR2).
Evidence + post-review generation (PR4).
All under require_auth. All under require_auth.
""" """
from __future__ import annotations from __future__ import annotations
@@ -10,6 +11,7 @@ from fastapi import APIRouter, Depends, HTTPException, Body
from .auth import require_auth from .auth import require_auth
from . import runbook_runner as runner from . import runbook_runner as runner
from . import runbook_artifacts as artifacts
runbook_runs_router = APIRouter(prefix="/api/runbooks/runs", tags=["runbook-runs"]) runbook_runs_router = APIRouter(prefix="/api/runbooks/runs", tags=["runbook-runs"])
@@ -80,6 +82,32 @@ async def complete_step(
return {"ok": True, "run_id": run_id, "step_index": step_index, "next_step": step_index + 1} return {"ok": True, "run_id": run_id, "step_index": step_index, "next_step": step_index + 1}
@runbook_runs_router.post("/{run_id}/evidence")
async def generate_evidence(
run_id: str,
_auth: str = Depends(require_auth),
):
"""Generate release evidence markdown from run step results. Returns {evidence_path, bytes, created_at}."""
try:
out = await artifacts.render_release_evidence(run_id)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
return out
@runbook_runs_router.post("/{run_id}/post_review")
async def generate_post_review(
run_id: str,
_auth: str = Depends(require_auth),
):
"""Generate post-release review markdown. Returns {path, bytes, created_at}."""
try:
out = await artifacts.render_post_review(run_id)
except ValueError as e:
raise HTTPException(status_code=404, detail=str(e))
return out
@runbook_runs_router.post("/{run_id}/abort") @runbook_runs_router.post("/{run_id}/abort")
async def abort_run( async def abort_run(
run_id: str, run_id: str,

View File

@@ -0,0 +1,249 @@
"""
Tests for runbook artifacts renderer (PR4):
- render_release_evidence: creates file, contains key sections
- render_post_review: creates file, contains TODO markers + auto-filled fields
- API endpoints: POST /evidence, /post_review return 200 with path/bytes
"""
from __future__ import annotations
import asyncio
import json
import time
from pathlib import Path
from unittest.mock import AsyncMock, patch
import pytest
# ── Fixtures ──────────────────────────────────────────────────────────────────
@pytest.fixture
def tmp_rehearsal_docs(tmp_path):
"""tmp_path/docs/runbook with rehearsal checklist."""
docs_root = tmp_path / "docs"
runbook_dir = docs_root / "runbook"
runbook_dir.mkdir(parents=True)
(runbook_dir / "rehearsal-v1-30min-checklist.md").write_text(
"# Rehearsal v1 — 30-minute execution plan\n\n"
"## 05 min — Preflight\n\nRun preflight.\n\n"
"## 510 min — Restart\n\nRestart service.",
encoding="utf-8",
)
return docs_root
def _fake_http_proc():
"""Fake subprocess for script steps: exit_code=0, stdout=PASS."""
from unittest.mock import MagicMock
proc = MagicMock()
proc.returncode = 0
proc.kill = MagicMock()
proc.wait = AsyncMock(return_value=None)
async def _communicate():
return b"PASS\n", b""
proc.communicate = _communicate
return proc
async def _create_and_run_all(tmp_path, tmp_docs):
"""Create run, execute all auto steps (http_check + script via mocks), return run_id."""
import app.docs_index as docs_index_mod
import app.runbook_runner as runner_mod
import app.db as db_mod
import os
os.environ["SOFIIA_DATA_DIR"] = str(tmp_path / "sofiia-data")
await docs_index_mod.rebuild_index(tmp_docs)
out = await runner_mod.create_run(
"runbook/rehearsal-v1-30min-checklist.md",
operator_id="test-op",
node_id="NODA2",
sofiia_url="http://127.0.0.1:8002",
)
run_id = out["run_id"]
steps_total = out["steps_total"]
# Execute all auto steps (http_check + script)
with patch("httpx.AsyncClient.get", new_callable=AsyncMock) as mock_http:
async def fake_get(url, **kwargs):
if "audit" in str(url):
return type("R", (), {"status_code": 401})()
return type("R", (), {"status_code": 200})()
mock_http.side_effect = fake_get
with patch("asyncio.create_subprocess_exec", new_callable=AsyncMock) as mock_exec:
mock_exec.return_value = _fake_http_proc()
for _ in range(steps_total):
step = await runner_mod.next_step(run_id, operator_id="test-op")
if step is None:
break
if step.get("type") == "manual":
await runner_mod.complete_step(run_id, step["step_index"], status="ok", notes="done")
return run_id
# ── Unit tests: renderer ──────────────────────────────────────────────────────
def test_render_release_evidence_file_created(sofiia_module, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""render_release_evidence creates a file at the expected path."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
out = loop.run_until_complete(art_mod.render_release_evidence(run_id))
assert "evidence_path" in out
assert "bytes" in out
assert out["bytes"] > 0
path = Path(out["evidence_path"])
assert path.exists(), f"Expected file at {path}"
def test_render_release_evidence_contains_key_sections(sofiia_module, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""Evidence file contains Release metadata, Preflight, Smoke, and Sign-off sections."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
out = loop.run_until_complete(art_mod.render_release_evidence(run_id))
content = Path(out["evidence_path"]).read_text(encoding="utf-8")
assert "Release metadata" in content
assert "Preflight" in content
assert "Smoke" in content
assert "Sign-off" in content
assert run_id in content
def test_render_release_evidence_includes_step_table(sofiia_module, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""Evidence contains a steps summary table with action_types."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
out = loop.run_until_complete(art_mod.render_release_evidence(run_id))
content = Path(out["evidence_path"]).read_text(encoding="utf-8")
assert "http_check" in content or "script" in content or "manual" in content
assert "All steps summary" in content
def test_render_post_review_file_created(sofiia_module, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""render_post_review creates post_review.md."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
out = loop.run_until_complete(art_mod.render_post_review(run_id))
assert "path" in out
path = Path(out["path"])
assert path.exists()
assert out["bytes"] > 0
def test_render_post_review_has_todo_markers(sofiia_module, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""Post-review has [TODO] markers for fields requiring manual input."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
out = loop.run_until_complete(art_mod.render_post_review(run_id))
content = Path(out["path"]).read_text(encoding="utf-8")
assert "[TODO]" in content
assert "Release Metadata" in content
assert "Preflight Outcome" in content
assert "Smoke Results" in content
assert "Action Items" in content
def test_render_post_review_autofills_operator(sofiia_module, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""Post-review auto-fills operator and run_id from DB."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
out = loop.run_until_complete(art_mod.render_post_review(run_id))
content = Path(out["path"]).read_text(encoding="utf-8")
assert "test-op" in content
assert run_id in content
def test_render_evidence_404_for_unknown_run(sofiia_module, tmp_path, monkeypatch):
"""render_release_evidence raises ValueError for unknown run_id."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
import app.db as _db
async def run():
await _db.init_db()
return await art_mod.render_release_evidence("00000000-0000-0000-0000-000000000000")
with pytest.raises(ValueError, match="Run not found"):
loop.run_until_complete(run())
# ── API endpoint tests ────────────────────────────────────────────────────────
def test_evidence_endpoint_404_for_unknown_run(sofiia_client):
"""POST /evidence returns 404 for unknown run_id."""
r = sofiia_client.post("/api/runbooks/runs/00000000-0000-0000-0000-000000000000/evidence")
assert r.status_code == 404
def test_post_review_endpoint_404_for_unknown_run(sofiia_client):
"""POST /post_review returns 404 for unknown run_id."""
r = sofiia_client.post("/api/runbooks/runs/00000000-0000-0000-0000-000000000000/post_review")
assert r.status_code == 404
def test_evidence_endpoint_success(sofiia_module, sofiia_client, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""POST /evidence returns 200 with evidence_path and bytes."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
r = sofiia_client.post(f"/api/runbooks/runs/{run_id}/evidence")
assert r.status_code == 200, r.text
data = r.json()
assert "evidence_path" in data
assert data.get("bytes", 0) > 0
assert run_id in data.get("evidence_path", "")
def test_post_review_endpoint_success(sofiia_module, sofiia_client, tmp_path, tmp_rehearsal_docs, monkeypatch):
"""POST /post_review returns 200 with path and bytes."""
monkeypatch.setenv("SOFIIA_DATA_DIR", str(tmp_path / "sofiia-data"))
loop = asyncio.get_event_loop()
import app.runbook_artifacts as art_mod
run_id = loop.run_until_complete(_create_and_run_all(tmp_path, tmp_rehearsal_docs))
r = sofiia_client.post(f"/api/runbooks/runs/{run_id}/post_review")
assert r.status_code == 200, r.text
data = r.json()
assert "path" in data
assert data.get("bytes", 0) > 0