NCS (services/node-capabilities/metrics.py): - NodeLoad: inflight_jobs, queue_depth, concurrency_limit, estimated_wait_ms, cpu_load_1m, mem_pressure (macOS + Linux), rtt_ms_to_hub - RuntimeLoad: per-runtime healthy, p50_ms, p95_ms from rolling 50-sample window - POST /capabilities/report_latency for node-worker → NCS reporting - NCS fetches worker metrics via NODE_WORKER_URL Node Worker: - GET /metrics endpoint (inflight, concurrency, latency buffers) - Latency tracking per job type (llm/vision) with rolling buffer - Fire-and-forget latency reporting to NCS after each successful job Router (model_select v3): - score_candidate(): wait + model_latency + cross_node_penalty + prefer_bonus - LOCAL_THRESHOLD_MS=250: prefer local if within threshold of remote - ModelSelection.score field for observability - Structured [score] logs with chosen node, model, and score breakdown Tests: 19 new (12 scoring + 7 NCS metrics), 36 total pass Docs: ops/runbook_p3_1.md, ops/CHANGELOG_FABRIC.md No breaking changes to JobRequest/JobResponse or capabilities schema. Made-with: Cursor
70 lines
2.1 KiB
Python
70 lines
2.1 KiB
Python
"""Tests for NCS metrics module."""
|
|
import sys
|
|
import os
|
|
import asyncio
|
|
|
|
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "services", "node-capabilities"))
|
|
|
|
from metrics import (
|
|
record_latency, get_latency_stats, get_cpu_load, get_mem_pressure,
|
|
build_node_load, build_runtime_load, _latency_buffer,
|
|
)
|
|
|
|
|
|
def setup_function():
|
|
_latency_buffer.clear()
|
|
|
|
|
|
def test_record_and_get_latency():
|
|
record_latency("ollama", "llm", 500)
|
|
record_latency("ollama", "llm", 300)
|
|
record_latency("ollama", "llm", 700)
|
|
stats = get_latency_stats("ollama", "llm")
|
|
assert stats["samples"] == 3
|
|
assert stats["p50_ms"] == 500
|
|
assert stats["p95_ms"] == 700
|
|
|
|
|
|
def test_empty_latency_stats():
|
|
stats = get_latency_stats("nonexistent", "llm")
|
|
assert stats["p50_ms"] is None
|
|
assert stats["samples"] == 0
|
|
|
|
|
|
def test_cpu_load_returns_float_or_none():
|
|
result = get_cpu_load()
|
|
assert result is None or isinstance(result, float)
|
|
|
|
|
|
def test_mem_pressure_returns_valid_or_none():
|
|
result = get_mem_pressure()
|
|
assert result is None or result in ("low", "medium", "high", "critical")
|
|
|
|
|
|
def test_build_node_load_defaults():
|
|
result = asyncio.run(build_node_load(worker_metrics={
|
|
"inflight_jobs": 0, "concurrency_limit": 2, "queue_depth": 0,
|
|
}))
|
|
assert result["inflight_jobs"] == 0
|
|
assert result["estimated_wait_ms"] == 0
|
|
assert result["concurrency_limit"] == 2
|
|
assert "ts" in result
|
|
|
|
|
|
def test_build_node_load_wait_when_busy():
|
|
record_latency("ollama", "llm", 1000)
|
|
result = asyncio.run(build_node_load(worker_metrics={
|
|
"inflight_jobs": 5, "concurrency_limit": 2, "queue_depth": 0,
|
|
}))
|
|
assert result["estimated_wait_ms"] == 4 * 1000
|
|
|
|
|
|
def test_build_runtime_load():
|
|
runtimes = {"ollama": {"status": "ok"}, "swapper": {"status": "error: timeout"}}
|
|
result = asyncio.run(build_runtime_load(runtimes))
|
|
assert len(result) == 2
|
|
ollama_rl = next(r for r in result if r["runtime"] == "ollama")
|
|
assert ollama_rl["healthy"] is True
|
|
swapper_rl = next(r for r in result if r["runtime"] == "swapper")
|
|
assert swapper_rl["healthy"] is False
|