Includes updates across gateway, router, node-worker, memory-service, aurora-service, swapper, sofiia-console UI and node2 infrastructure: - gateway-bot: Dockerfile, http_api.py, druid/aistalk prompts, doc_service - services/router: main.py, router-config.yml, fabric_metrics, memory_retrieval, offload_client, prompt_builder - services/node-worker: worker.py, main.py, config.py, fabric_metrics - services/memory-service: Dockerfile, database.py, main.py, requirements - services/aurora-service: main.py (+399), kling.py, quality_report.py - services/swapper-service: main.py, swapper_config_node2.yaml - services/sofiia-console: static/index.html (console UI update) - config: agent_registry, crewai_agents/teams, router_agents - ops/fabric_preflight.sh: updated preflight checks - router-config.yml, docker-compose.node2.yml: infra updates - docs: NODA1-AGENT-ARCHITECTURE, fabric_contract updated Made-with: Cursor
136 lines
4.3 KiB
Python
136 lines
4.3 KiB
Python
"""Node Worker — NATS offload executor for cross-node inference."""
|
|
import logging
|
|
import os
|
|
|
|
from fastapi import FastAPI
|
|
|
|
import config
|
|
import worker
|
|
|
|
logging.basicConfig(level=logging.INFO)
|
|
logger = logging.getLogger("node-worker")
|
|
|
|
app = FastAPI(title="Node Worker", version="1.0.0")
|
|
|
|
_nats_client = None
|
|
|
|
|
|
@app.get("/healthz")
|
|
async def healthz():
|
|
connected = _nats_client is not None and _nats_client.is_connected if _nats_client else False
|
|
return {
|
|
"status": "ok" if connected else "degraded",
|
|
"node_id": config.NODE_ID,
|
|
"nats_connected": connected,
|
|
"max_concurrency": config.MAX_CONCURRENCY,
|
|
}
|
|
|
|
|
|
@app.get("/metrics")
|
|
async def metrics():
|
|
return worker.get_metrics()
|
|
|
|
|
|
@app.get("/prom_metrics")
|
|
async def prom_metrics():
|
|
from fastapi.responses import Response
|
|
import fabric_metrics as fm
|
|
data = fm.get_metrics_text()
|
|
if data:
|
|
return Response(content=data, media_type="text/plain; charset=utf-8")
|
|
return {"error": "prometheus_client not installed"}
|
|
|
|
|
|
@app.get("/caps")
|
|
async def caps():
|
|
"""Capability flags for NCS to aggregate.
|
|
|
|
Semantic vs operational separation (contract):
|
|
- capabilities.voice_* = semantic availability (provider configured).
|
|
True as long as the provider is configured, regardless of NATS state.
|
|
Routing decisions are based on this.
|
|
- runtime.nats_subscriptions.voice_* = operational (NATS sub active).
|
|
Used for health/telemetry only — NOT for routing.
|
|
|
|
This prevents false-negatives during reconnects / restart races.
|
|
"""
|
|
import worker as _w
|
|
nid = config.NODE_ID.lower()
|
|
|
|
# Semantic: provider configured → capability is available
|
|
voice_tts_cap = config.TTS_PROVIDER != "none"
|
|
voice_stt_cap = config.STT_PROVIDER != "none"
|
|
voice_llm_cap = True # LLM always available when node-worker is up
|
|
|
|
# Operational: actual NATS subscription state (health/telemetry only)
|
|
nats_voice_tts_active = f"node.{nid}.voice.tts.request" in _w._VOICE_SUBJECTS
|
|
nats_voice_stt_active = f"node.{nid}.voice.stt.request" in _w._VOICE_SUBJECTS
|
|
nats_voice_llm_active = f"node.{nid}.voice.llm.request" in _w._VOICE_SUBJECTS
|
|
|
|
return {
|
|
"node_id": config.NODE_ID,
|
|
"capabilities": {
|
|
"llm": True,
|
|
"vision": True,
|
|
"stt": config.STT_PROVIDER != "none",
|
|
"tts": config.TTS_PROVIDER != "none",
|
|
"ocr": config.OCR_PROVIDER != "none",
|
|
"image": config.IMAGE_PROVIDER != "none",
|
|
# Voice HA semantic capability flags (provider-based, not NATS-based)
|
|
"voice_tts": voice_tts_cap,
|
|
"voice_llm": voice_llm_cap,
|
|
"voice_stt": voice_stt_cap,
|
|
},
|
|
"providers": {
|
|
"stt": config.STT_PROVIDER,
|
|
"tts": config.TTS_PROVIDER,
|
|
"ocr": config.OCR_PROVIDER,
|
|
"image": config.IMAGE_PROVIDER,
|
|
},
|
|
"defaults": {
|
|
"llm": config.DEFAULT_LLM,
|
|
"vision": config.DEFAULT_VISION,
|
|
},
|
|
"concurrency": config.MAX_CONCURRENCY,
|
|
"voice_concurrency": {
|
|
"voice_tts": config.VOICE_MAX_CONCURRENT_TTS,
|
|
"voice_llm": config.VOICE_MAX_CONCURRENT_LLM,
|
|
"voice_stt": config.VOICE_MAX_CONCURRENT_STT,
|
|
},
|
|
# Operational NATS subscription state — for health/monitoring only
|
|
"runtime": {
|
|
"nats_subscriptions": {
|
|
"voice_tts_active": nats_voice_tts_active,
|
|
"voice_stt_active": nats_voice_stt_active,
|
|
"voice_llm_active": nats_voice_llm_active,
|
|
}
|
|
},
|
|
}
|
|
|
|
|
|
@app.on_event("startup")
|
|
async def startup():
|
|
global _nats_client
|
|
try:
|
|
import nats as nats_lib
|
|
_nats_client = await nats_lib.connect(config.NATS_URL)
|
|
logger.info(f"✅ NATS connected: {config.NATS_URL}")
|
|
await worker.start(_nats_client)
|
|
logger.info(f"✅ Node Worker ready: node={config.NODE_ID} concurrency={config.MAX_CONCURRENCY}")
|
|
except Exception as e:
|
|
logger.error(f"❌ Startup failed: {e}")
|
|
|
|
|
|
@app.on_event("shutdown")
|
|
async def shutdown():
|
|
if _nats_client:
|
|
try:
|
|
await _nats_client.close()
|
|
except Exception:
|
|
pass
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import uvicorn
|
|
uvicorn.run(app, host="0.0.0.0", port=config.PORT)
|