- ops/audit_node2_20260227.md: readable report (hardware, containers, models, Sofiia, findings) - ops/audit_node2_20260227.json: structured machine-readable inventory - ops/audit_node2_findings.yml: 10 PASS + 5 PARTIAL + 3 FAIL + 3 SECURITY gaps - ops/node2_capabilities.yml: router-ready capabilities (vision/text/code/stt/tts models) Key findings: P0: vision pipeline broken (/vision/models=empty, qwen3-vl:8b not installed) P1: node-ops-worker missing, SSH root password in sofiia-console env P1: router-config.yml uses 172.17.0.1 (Linux bridge) not host.docker.internal Made-with: Cursor
130 lines
7.0 KiB
JSON
130 lines
7.0 KiB
JSON
{
|
|
"node_id": "noda2",
|
|
"hostname": "MacBook-Pro.local",
|
|
"timestamp": "2026-02-27T08:00:00Z",
|
|
"hardware": {
|
|
"cpu": "Apple M4 Max",
|
|
"ram_gb": 64,
|
|
"storage_free_gb": 634,
|
|
"os": "macOS 26.3 (Darwin arm64)"
|
|
},
|
|
"backends": [
|
|
{
|
|
"name": "ollama-main",
|
|
"base_url": "http://localhost:11434",
|
|
"pid_owner": "ollama (system daemon)",
|
|
"version": "0.17.1",
|
|
"gpu_mode": "Apple Silicon (MPS/Metal, unified memory)",
|
|
"models": [
|
|
{"name": "qwen3.5:35b-a3b", "type": "llm", "size_gb": 9.3, "family": "qwen3", "params": "14.8B (MoE)", "modified": "19h ago"},
|
|
{"name": "qwen3:14b", "type": "llm", "size_gb": 9.3, "family": "qwen3", "params": "14B", "modified": "20h ago"},
|
|
{"name": "gemma3:latest", "type": "llm", "size_gb": 3.3, "family": "gemma3","params": "~4B", "modified": "25h ago"},
|
|
{"name": "glm-4.7-flash:32k", "type": "llm", "size_gb": 19.0, "family": "glm", "params": "32B", "modified": "2w ago"},
|
|
{"name": "glm-4.7-flash:q4_K_M", "type": "llm", "size_gb": 19.0, "family": "glm", "params": "32B", "modified": "2w ago"},
|
|
{"name": "llava:13b", "type": "vision", "size_gb": 8.0, "family": "llava+clip", "params": "13B", "modified": "3mo ago", "vision_capable": true},
|
|
{"name": "mistral-nemo:12b","type": "llm", "size_gb": 7.1, "family": "mistral","params": "12B", "modified": "3mo ago"},
|
|
{"name": "deepseek-coder:33b","type":"code","size_gb": 18.0,"family":"deepseek","params":"33B", "modified": "3mo ago"},
|
|
{"name": "deepseek-r1:70b", "type": "llm", "size_gb": 42.0,"family":"deepseek","params":"70B", "modified": "3mo ago"},
|
|
{"name": "starcoder2:3b", "type": "code","size_gb": 1.7, "family":"starcoder","params":"3B", "modified": "3mo ago"},
|
|
{"name": "phi3:latest", "type": "llm", "size_gb": 2.2, "family":"phi3", "params": "~4B", "modified": "3mo ago"},
|
|
{"name": "gpt-oss:latest", "type": "llm", "size_gb": 13.0,"family":"unknown","params":"~13B", "modified": "3mo ago"}
|
|
],
|
|
"running": [],
|
|
"notes": "llava:13b is only vision-capable model in Ollama (CLIP multimodal). qwen3-vl NOT installed."
|
|
},
|
|
{
|
|
"name": "llama-server-cpu",
|
|
"base_url": "http://localhost:11435",
|
|
"pid_owner": "llama-server process (user: apple)",
|
|
"binary": "llama-server",
|
|
"model_path": "/Users/apple/Library/Application Support/llama.cpp/models/Qwen3.5-35B-A3B-Q4_K_M.gguf",
|
|
"model_name": "Qwen3.5-35B-A3B (Q4_K_M, llama.cpp)",
|
|
"api_type": "OpenAI-compatible (/v1/models, /v1/chat/completions)",
|
|
"health_url": "http://localhost:11435/health",
|
|
"health_status": "ok",
|
|
"gpu_mode": "Apple Silicon (Metal via llama.cpp)",
|
|
"notes": "Separate llama.cpp server instance running same Qwen3.5-35B-A3B model. Duplicates Ollama coverage."
|
|
},
|
|
{
|
|
"name": "swapper-service",
|
|
"base_url": "http://localhost:8890",
|
|
"container": "swapper-service-node2",
|
|
"health_url": "http://localhost:8890/health",
|
|
"health_status": "healthy",
|
|
"active_model": "qwen3-14b",
|
|
"mode": "single-active",
|
|
"ollama_base_url": "http://host.docker.internal:11434",
|
|
"endpoints": {
|
|
"/models": "200",
|
|
"/vision/models": "200 (empty list - no vision models configured!)",
|
|
"/stt/models": "200",
|
|
"/tts/models": "200",
|
|
"/ocr": "405 (method)"
|
|
},
|
|
"swapper_models_configured": [
|
|
"gpt-oss:latest", "phi3:latest", "qwen3:14b (loaded)",
|
|
"qwen3.5:35b-a3b", "glm-4.7-flash:32k", "gemma2:27b (not installed)",
|
|
"deepseek-coder:33b", "qwen2.5-coder:32b (not installed)", "deepseek-r1:70b"
|
|
],
|
|
"gap": "vision/models returns empty - llava:13b not in swapper_config_node2.yaml"
|
|
}
|
|
],
|
|
"containers": [
|
|
{"name": "dagi-router-node2", "port": "9102->8000", "status": "healthy", "nats_connected": true, "node_id": "NODA2"},
|
|
{"name": "dagi-gateway-node2", "port": "9300", "status": "healthy", "agents": 14},
|
|
{"name": "dagi-nats-node2", "port": "4222,8222", "status": "running", "leafnode": "spoke->144.76.224.179:7422", "rtt_ms": 58},
|
|
{"name": "dagi-memory-service-node2","port": "8000", "status": "healthy", "collections": 6},
|
|
{"name": "dagi-qdrant-node2", "port": "6333-6334", "status": "healthy"},
|
|
{"name": "swapper-service-node2", "port": "8890", "status": "healthy"},
|
|
{"name": "dagi-postgres-node2", "port": "5433->5432", "status": "healthy"},
|
|
{"name": "dagi-neo4j-node2", "port": "7474,7687", "status": "healthy"},
|
|
{"name": "sofiia-console", "port": "8002", "status": "running (Python process, not Docker healthcheck)"},
|
|
{"name": "open-webui", "port": "8080", "status": "healthy", "version": "0.7.2"}
|
|
],
|
|
"non_docker_services": [
|
|
{"name": "ollama", "port": 11434, "type": "system daemon", "binary": "ollama"},
|
|
{"name": "llama-server", "port": 11435, "type": "user process", "model": "Qwen3.5-35B-A3B-Q4_K_M.gguf"},
|
|
{"name": "gitea", "port": 3000, "type": "git server", "version": "1.25.3"},
|
|
{"name": "spacebot", "port": 19898, "type": "Telegram bot", "config": "uses sofiia-console BFF"},
|
|
{"name": "opencode", "port": 3456, "type": "AI coding tool", "note": "OpenCode.app"},
|
|
{"name": "stable (Warp)", "port": 9277, "type": "terminal helper","note": "Warp.app stable process, not SD"}
|
|
],
|
|
"sofiia_agent": {
|
|
"agent_id": "sofiia",
|
|
"display_name": "Sophia",
|
|
"class": "top_level",
|
|
"canonical_role": "Chief AI Architect & Technical Sovereign",
|
|
"telegram": "@SofiiaDAARION_bot",
|
|
"prompt_file": "gateway-bot/sofiia_prompt.txt",
|
|
"prompt_lines": 1579,
|
|
"llm_profile": "NODA2 router -> Ollama 11434",
|
|
"gateway": "dagi-gateway-node2:9300",
|
|
"control_plane": {
|
|
"console_ui": "http://localhost:8002 (sofiia-console)",
|
|
"spacebot": "spacebot process -> sofiia-console BFF (http://localhost:8002/api)",
|
|
"nats_subjects": "not yet configured for node-ops",
|
|
"ssh_access": "NODES_NODA1_SSH_PASSWORD in env (present, SECURITY RISK)",
|
|
"node_ops_worker": "NOT IMPLEMENTED"
|
|
}
|
|
},
|
|
"nats_leafnode": {
|
|
"noda2_role": "spoke",
|
|
"noda1_hub": "144.76.224.179:7422",
|
|
"rtt_ms": 58,
|
|
"connection_status": "connected",
|
|
"cross_node_subjects_tested": ["node.test.hello"],
|
|
"cross_node_pubsub": "PASS"
|
|
},
|
|
"qdrant_collections": {
|
|
"sofiia_messages": {"points": 2},
|
|
"sofiia_docs": {"points": 0},
|
|
"sofiia_memory_items":{"points": 0},
|
|
"sofiia_user_context":{"points": 0},
|
|
"memories": {"points": 0},
|
|
"messages": {"points": 0}
|
|
},
|
|
"recommended_default_vision_model": "llava:13b (Ollama, port 11434) — only available, but outdated. Install qwen3-vl:8b for better quality.",
|
|
"recommended_default_text_model": "qwen3.5:35b-a3b (Ollama, port 11434) — fastest large model via MoE architecture",
|
|
"recommended_default_code_model": "deepseek-coder:33b (Ollama) or qwen3.5:35b-a3b"
|
|
}
|