feat: Add Alateya, Clan, Eonarch agents + fix gateway-router connection

## Agents Added
- Alateya: R&D, biotech, innovations
- Clan (Spirit): Community spirit agent
- Eonarch: Consciousness evolution agent

## Changes
- docker-compose.node1.yml: Added tokens for all 3 new agents
- gateway-bot/http_api.py: Added configs and webhook endpoints
- gateway-bot/clan_prompt.txt: New prompt file
- gateway-bot/eonarch_prompt.txt: New prompt file

## Fixes
- Fixed ROUTER_URL from :9102 to :8000 (internal container port)
- All 9 Telegram agents now working

## Documentation
- Created PROJECT-MASTER-INDEX.md - single entry point
- Added various status documents and scripts

Tokens configured:
- Helion, NUTRA, Agromatrix (existing)
- Alateya, Clan, Eonarch (new)
- Druid, GreenFood, DAARWIZZ (configured)
This commit is contained in:
Apple
2026-01-28 06:40:34 -08:00
parent 4aeb69e7ae
commit 0c8bef82f4
120 changed files with 21905 additions and 425 deletions

View File

@@ -11,7 +11,7 @@ logger = logging.getLogger(__name__)
MEMORY_SERVICE_URL = os.getenv("MEMORY_SERVICE_URL", "http://memory-service:8000")
CONTEXT_CACHE_TTL = float(os.getenv("MEMORY_CONTEXT_CACHE_TTL", "5"))
LOCAL_CONTEXT_MAX_MESSAGES = int(os.getenv("LOCAL_CONTEXT_MAX_MESSAGES", "20"))
LOCAL_CONTEXT_MAX_MESSAGES = int(os.getenv("LOCAL_CONTEXT_MAX_MESSAGES", "50"))
# =====================================
# LOCAL CONTEXT STORE (fallback when Memory Service unavailable)
@@ -34,7 +34,7 @@ class LocalContextStore:
"timestamp": datetime.now().isoformat()
})
def get_context(self, chat_id: str, limit: int = 10) -> List[Dict[str, Any]]:
def get_context(self, chat_id: str, limit: int = 30) -> List[Dict[str, Any]]:
"""Отримати останні повідомлення для контексту"""
if chat_id not in self._store:
return []
@@ -46,14 +46,14 @@ class LocalContextStore:
if chat_id in self._store:
del self._store[chat_id]
def format_for_prompt(self, chat_id: str, limit: int = 10) -> str:
def format_for_prompt(self, chat_id: str, limit: int = 30) -> str:
"""Форматувати контекст для system prompt"""
messages = self.get_context(chat_id, limit)
if not messages:
return ""
lines = []
for msg in messages:
role = "User" if msg["role"] == "user" else "Helion"
role = "User" if msg["role"] == "user" else "Assistant"
lines.append(f"{role}: {msg['text']}")
return "\n".join(lines)
@@ -98,8 +98,56 @@ class MemoryClient:
if cached and now - cached[0] < CONTEXT_CACHE_TTL:
return cached[1]
# FALLBACK: Використовуємо локальний контекст
# (Memory Service API не сумісний - тимчасове рішення)
# Спроба отримати контекст із Memory Service
try:
async with httpx.AsyncClient(timeout=self.timeout) as client:
params = {
"user_id": user_id,
"channel_id": channel_id,
"limit": limit,
}
resp = await client.get(
f"{self.base_url}/agents/{agent_id}/memory",
params=params,
headers={"Authorization": f"Bearer {user_id}"},
)
if resp.status_code == 200:
data = resp.json()
events = data.get("events", [])
# Сортуємо за timestamp, якщо є
events = sorted(
events,
key=lambda e: e.get("timestamp", ""),
)
recent_events = [
{
"body_text": e.get("content", ""),
"kind": e.get("kind", "message"),
"type": "user" if e.get("role") == "user" else "agent",
}
for e in events
if e.get("content")
]
# Формуємо контекст для prompt
lines = []
for e in events:
content = e.get("content", "")
if not content:
continue
role = "User" if e.get("role") == "user" else "Assistant"
lines.append(f"{role}: {content}")
result = {
"facts": [],
"recent_events": recent_events,
"dialog_summaries": [],
"local_context_text": "\n".join(lines[-limit:]),
}
self._context_cache[cache_key] = (now, result)
return result
except Exception as e:
logger.debug(f"Memory Service context fetch failed, using local: {e}")
# FALLBACK: локальний контекст (in-memory)
local_messages = local_context.get_context(str(channel_id or user_id), limit)
local_events = [
{"body_text": msg["text"], "kind": "message", "type": "user" if msg["role"] == "user" else "agent"}
@@ -110,7 +158,7 @@ class MemoryClient:
"facts": [],
"recent_events": local_events,
"dialog_summaries": [],
"local_context_text": local_context.format_for_prompt(str(channel_id or user_id), limit)
"local_context_text": local_context.format_for_prompt(str(channel_id or user_id), limit),
}
self._context_cache[cache_key] = (now, result)
return result