## Agents Added - Alateya: R&D, biotech, innovations - Clan (Spirit): Community spirit agent - Eonarch: Consciousness evolution agent ## Changes - docker-compose.node1.yml: Added tokens for all 3 new agents - gateway-bot/http_api.py: Added configs and webhook endpoints - gateway-bot/clan_prompt.txt: New prompt file - gateway-bot/eonarch_prompt.txt: New prompt file ## Fixes - Fixed ROUTER_URL from :9102 to :8000 (internal container port) - All 9 Telegram agents now working ## Documentation - Created PROJECT-MASTER-INDEX.md - single entry point - Added various status documents and scripts Tokens configured: - Helion, NUTRA, Agromatrix (existing) - Alateya, Clan, Eonarch (new) - Druid, GreenFood, DAARWIZZ (configured)
89 lines
2.9 KiB
Python
89 lines
2.9 KiB
Python
"""
|
|
DAGI Router Client
|
|
Sends requests to DAGI Router from Bot Gateway
|
|
"""
|
|
import logging
|
|
import os
|
|
import httpx
|
|
from typing import Dict, Any
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Router configuration from environment
|
|
ROUTER_BASE_URL = os.getenv("ROUTER_URL", "http://127.0.0.1:9102")
|
|
# Increased timeout for image generation + LLM calls (FLUX takes ~17s, LLM can take 30-60s)
|
|
ROUTER_TIMEOUT = float(os.getenv("ROUTER_TIMEOUT", "180.0"))
|
|
|
|
|
|
async def send_to_router(body: Dict[str, Any]) -> Dict[str, Any]:
|
|
"""
|
|
Send request to DAGI Router.
|
|
|
|
Args:
|
|
body: Request payload with mode, message, agent, metadata, etc.
|
|
|
|
Returns:
|
|
Router response as dict
|
|
|
|
Raises:
|
|
httpx.HTTPError: if router request fails
|
|
"""
|
|
agent_id = body.get("agent", "devtools")
|
|
message = body.get("message", "")
|
|
metadata = body.get("metadata", {})
|
|
context = body.get("context", {})
|
|
|
|
# Get system_prompt - check both body level and context level
|
|
system_prompt = body.get("system_prompt") or context.get("system_prompt")
|
|
|
|
if system_prompt:
|
|
logger.info(f"Using system prompt ({len(system_prompt)} chars) for agent {agent_id}")
|
|
|
|
# Build infer request
|
|
infer_url = f"{ROUTER_BASE_URL}/v1/agents/{agent_id}/infer"
|
|
|
|
# Ensure agent_id is in metadata for memory storage
|
|
metadata["agent_id"] = agent_id
|
|
|
|
infer_body = {
|
|
"prompt": message,
|
|
"system_prompt": system_prompt,
|
|
"metadata": metadata
|
|
}
|
|
|
|
# Pass images if present in context
|
|
images = context.get("images", [])
|
|
if images:
|
|
infer_body["images"] = images
|
|
logger.info(f"Including {len(images)} image(s) in request")
|
|
|
|
# Pass provider override if specified
|
|
if metadata.get("provider"):
|
|
infer_body["provider_override"] = metadata["provider"]
|
|
|
|
logger.info(f"Sending to Router ({infer_url}): agent={agent_id}, provider={metadata.get('provider', 'default')}, has_images={bool(images)}")
|
|
|
|
try:
|
|
async with httpx.AsyncClient(timeout=ROUTER_TIMEOUT) as client:
|
|
response = await client.post(infer_url, json=infer_body)
|
|
response.raise_for_status()
|
|
|
|
result = response.json()
|
|
|
|
# Convert Router response to Gateway expected format
|
|
return {
|
|
"ok": True,
|
|
"data": {
|
|
"text": result.get("response", result.get("text", "")),
|
|
"image_base64": result.get("image_base64") # Generated image
|
|
},
|
|
"response": result.get("response", result.get("text", "")),
|
|
"model": result.get("model"),
|
|
"backend": result.get("backend"),
|
|
"image_base64": result.get("image_base64") # For easy access
|
|
}
|
|
|
|
except httpx.HTTPError as e:
|
|
logger.error(f"Router request failed: {e}")
|
|
raise
|