node1: add universal file tool, gateway document delivery, and sync runbook

This commit is contained in:
Apple
2026-02-15 01:50:37 -08:00
parent dd4b466d79
commit 21576f0ca3
7 changed files with 2207 additions and 131 deletions

View File

@@ -57,6 +57,172 @@ LAST_PENDING_STATE: Dict[str, Dict[str, Any]] = {}
PENDING_STATE_TTL = 1800 # 30 minutes
# Per-user language preference cache (chat_id:user_id -> {lang, ts})
USER_LANGUAGE_PREFS: Dict[str, Dict[str, Any]] = {}
USER_LANGUAGE_PREF_TTL = 30 * 24 * 3600 # 30 days
# Recent photo context for follow-up questions in chat (agent:chat:user -> {file_id, ts})
RECENT_PHOTO_CONTEXT: Dict[str, Dict[str, Any]] = {}
RECENT_PHOTO_TTL = 30 * 60 # 30 minutes
def _cleanup_recent_photo_context() -> None:
now = time.time()
expired = [k for k, v in RECENT_PHOTO_CONTEXT.items() if now - float(v.get("ts", 0)) > RECENT_PHOTO_TTL]
for k in expired:
del RECENT_PHOTO_CONTEXT[k]
def _set_recent_photo_context(agent_id: str, chat_id: str, user_id: str, file_id: str) -> None:
_cleanup_recent_photo_context()
key = f"{agent_id}:{chat_id}:{user_id}"
RECENT_PHOTO_CONTEXT[key] = {"file_id": file_id, "ts": time.time()}
def _get_recent_photo_file_id(agent_id: str, chat_id: str, user_id: str) -> Optional[str]:
_cleanup_recent_photo_context()
key = f"{agent_id}:{chat_id}:{user_id}"
rec = RECENT_PHOTO_CONTEXT.get(key)
if not rec:
return None
return rec.get("file_id")
def _looks_like_photo_followup(text: str) -> bool:
if not text:
return False
t = text.strip().lower()
markers = [
"що ти бачиш", "що на фото", "що на зображенні", "опиши фото", "подивись фото",
"what do you see", "what is in the image", "describe the photo",
"что ты видишь", "что на фото", "опиши фото", "посмотри фото",
]
return any(m in t for m in markers)
def _cleanup_user_language_prefs() -> None:
now = time.time()
expired = [k for k, v in USER_LANGUAGE_PREFS.items() if now - float(v.get("ts", 0)) > USER_LANGUAGE_PREF_TTL]
for k in expired:
del USER_LANGUAGE_PREFS[k]
def _normalize_lang_code(raw: Optional[str]) -> Optional[str]:
if not raw:
return None
code = str(raw).strip().lower().replace("_", "-")
if code.startswith("uk"):
return "uk"
if code.startswith("ru"):
return "ru"
if code.startswith("en"):
return "en"
return None
def _detect_language_from_text(text: str) -> Optional[str]:
if not text:
return None
t = text.lower()
letters = [ch for ch in t if ch.isalpha()]
if not letters:
return None
cyr = sum(1 for ch in letters if "а" <= ch <= "я" or ch in "іїєґё")
lat = sum(1 for ch in letters if "a" <= ch <= "z")
if cyr >= 3 and cyr >= lat:
# Ukrainian-specific letters strongly indicate Ukrainian.
if any(ch in t for ch in "іїєґ"):
return "uk"
# Russian-specific letters/symbols.
if any(ch in t for ch in "ёыэъ"):
return "ru"
# Soft lexical preference.
uk_hits = sum(1 for w in ("що", "який", "дякую", "будь", "будь ласка", "привіт") if w in t)
ru_hits = sum(1 for w in ("что", "какой", "спасибо", "пожалуйста", "привет") if w in t)
if uk_hits > ru_hits:
return "uk"
if ru_hits > uk_hits:
return "ru"
return "uk"
if lat >= 3 and lat > cyr:
return "en"
return None
def resolve_preferred_language(chat_id: str, user_id: str, text: str, telegram_lang_code: Optional[str]) -> str:
_cleanup_user_language_prefs()
key = f"{chat_id}:{user_id}"
text_lang = _detect_language_from_text(text)
tg_lang = _normalize_lang_code(telegram_lang_code)
cached_lang = USER_LANGUAGE_PREFS.get(key, {}).get("lang")
preferred = text_lang or tg_lang or cached_lang or "uk"
USER_LANGUAGE_PREFS[key] = {"lang": preferred, "ts": time.time()}
return preferred
def preferred_language_label(lang: str) -> str:
return {
"uk": "Ukrainian",
"ru": "Russian",
"en": "English",
}.get((lang or "").lower(), "Ukrainian")
def _extract_preferred_language_from_profile_fact(fact: Optional[Dict[str, Any]]) -> Optional[str]:
if not isinstance(fact, dict):
return None
data = fact.get("fact_value_json")
if not isinstance(data, dict):
return None
preferred = _normalize_lang_code(data.get("preferred_language"))
if preferred:
return preferred
return _normalize_lang_code(data.get("language_code"))
async def resolve_preferred_language_persistent(
chat_id: str,
user_id: str,
text: str,
telegram_lang_code: Optional[str],
team_id: Optional[str] = None,
) -> str:
"""Resolve language with memory-service fallback for post-restart continuity."""
_cleanup_user_language_prefs()
key = f"{chat_id}:{user_id}"
text_lang = _detect_language_from_text(text)
tg_lang = _normalize_lang_code(telegram_lang_code)
cached_lang = USER_LANGUAGE_PREFS.get(key, {}).get("lang")
if text_lang or tg_lang or cached_lang:
preferred = text_lang or tg_lang or cached_lang or "uk"
USER_LANGUAGE_PREFS[key] = {"lang": preferred, "ts": time.time()}
return preferred
try:
fact = await memory_client.get_fact(
user_id=f"tg:{user_id}",
fact_key="profile",
team_id=team_id,
)
fact_lang = _extract_preferred_language_from_profile_fact(fact)
if fact_lang:
USER_LANGUAGE_PREFS[key] = {"lang": fact_lang, "ts": time.time()}
return fact_lang
except Exception as e:
logger.debug(f"preferred language fact lookup failed: {e}")
USER_LANGUAGE_PREFS[key] = {"lang": "uk", "ts": time.time()}
return "uk"
def _pending_state_cleanup():
now = time.time()
expired = [cid for cid, rec in LAST_PENDING_STATE.items() if now - rec.get('ts', 0) > PENDING_STATE_TTL]
@@ -483,9 +649,36 @@ async def agromatrix_telegram_webhook(update: TelegramUpdate):
if user_id and user_id in op_ids:
is_ops = True
# Operator NL or slash commands -> handle via Stepan handler
if is_slash or is_ops:
# Operator NL or operator slash commands -> handle via Stepan handler.
# Important: do NOT treat generic slash commands (/start, /agromatrix) as operator commands,
# otherwise regular users will see "Недостатньо прав" or Stepan errors.
operator_slash_cmds = {
"whoami",
"pending",
"pending_show",
"approve",
"reject",
"apply_dict",
"pending_stats",
}
slash_cmd = ""
if is_slash:
try:
slash_cmd = (msg_text.strip().split()[0].lstrip("/").strip().lower())
except Exception:
slash_cmd = ""
is_operator_slash = bool(slash_cmd) and slash_cmd in operator_slash_cmds
# Stepan handler currently depends on ChatOpenAI (OPENAI_API_KEY). If key is not configured,
# never route production traffic there (avoid "Помилка обробки..." and webhook 5xx).
stepan_enabled = bool(os.getenv("OPENAI_API_KEY", "").strip())
if stepan_enabled and (is_ops or is_operator_slash):
return await handle_stepan_message(update, AGROMATRIX_CONFIG)
if (is_ops or is_operator_slash) and not stepan_enabled:
logger.warning(
"Stepan handler disabled (OPENAI_API_KEY missing); falling back to Router pipeline "
f"for chat_id={chat_id}, user_id={user_id}, slash_cmd={slash_cmd!r}"
)
# General conversation -> standard Router pipeline (like all other agents)
return await handle_telegram_webhook(AGROMATRIX_CONFIG, update)
@@ -611,14 +804,37 @@ def extract_bot_mentions(text: str) -> List[str]:
return mentions
def should_force_detailed_reply(text: str) -> bool:
"""Soft signal: user explicitly asks for details/long format."""
if not text:
return False
lower = text.strip().lower()
detail_markers = [
"детально", "подробно", "розгорну", "розпиши", "по всіх пунктах",
"step by step", "покроково", "з прикладами", "глибоко", "deep dive",
"full", "повний розбір", "максимально детально",
]
return any(m in lower for m in detail_markers)
def should_force_concise_reply(text: str) -> bool:
"""Якщо коротке або без питального знаку — просимо агента відповісти стисло."""
"""Soft concise mode by default, unless user asks for detailed answer."""
if not text:
return True
stripped = text.strip()
if len(stripped) <= 120 and "?" not in stripped:
if not stripped:
return True
return False
if should_force_detailed_reply(stripped):
return False
# Very long user request usually means they expect context-aware answer.
if len(stripped) > 700:
return False
# For regular Q&A in chat keep first response concise by default.
return True
COMPLEX_REASONING_KEYWORDS = [
@@ -808,7 +1024,9 @@ async def process_photo(
user_id: str,
username: str,
dao_id: str,
photo: Dict[str, Any]
photo: Dict[str, Any],
caption_override: Optional[str] = None,
bypass_media_gate: bool = False,
) -> Dict[str, Any]:
"""
Універсальна функція для обробки фото для будь-якого агента.
@@ -833,9 +1051,10 @@ async def process_photo(
return {"ok": False, "error": "No file_id in photo"}
logger.info(f"{agent_config.name}: Photo from {username} (tg:{user_id}), file_id: {file_id}")
_set_recent_photo_context(agent_config.agent_id, chat_id, user_id, file_id)
# Get caption for media question check
caption = (update.message or {}).get("caption") or ""
caption = caption_override if caption_override is not None else ((update.message or {}).get("caption") or "")
chat = (update.message or {}).get("chat", {})
chat_type = chat.get("type", "private")
is_private_chat = chat_type == "private"
@@ -843,7 +1062,7 @@ async def process_photo(
# BEHAVIOR POLICY v1: Media-no-comment
# Check if photo has a question/request in caption
if not is_private_chat and not is_training:
if not bypass_media_gate and not is_private_chat and not is_training:
has_question = detect_media_question(caption)
if not has_question:
logger.info(f"🔇 MEDIA-NO-COMMENT: Photo without question. Agent {agent_config.agent_id} NOT responding.")
@@ -961,10 +1180,10 @@ async def process_photo(
else:
await send_telegram_message(
chat_id,
"Не вдалося отримати опис зображення.",
"Не вдалося коректно обробити фото. Спробуйте інше фото або додайте короткий опис, що саме перевірити.",
telegram_token
)
return {"ok": False, "error": "No description in response"}
return {"ok": True, "handled": True, "reason": "vision_empty_response"}
else:
error_msg = response.get("error", "Unknown error") if isinstance(response, dict) else "Router error"
logger.error(f"{agent_config.name}: Vision-8b error: {error_msg}")
@@ -1338,6 +1557,13 @@ async def handle_telegram_webhook(
# Get DAO ID for this chat
dao_id = get_dao_id(chat_id, "telegram", agent_id=agent_config.agent_id)
initial_preferred_lang = resolve_preferred_language(
chat_id=chat_id,
user_id=user_id,
text=update.message.get("text", ""),
telegram_lang_code=from_user.get("language_code"),
)
# Оновлюємо факти про користувача/агента для побудови графу пам'яті
asyncio.create_task(
memory_client.upsert_fact(
@@ -1348,6 +1574,7 @@ async def handle_telegram_webhook(
"first_name": first_name,
"last_name": last_name,
"language_code": from_user.get("language_code"),
"preferred_language": initial_preferred_lang,
"is_bot": is_sender_bot,
},
team_id=dao_id,
@@ -1919,8 +2146,7 @@ async def handle_telegram_webhook(
result = await process_photo(
agent_config, update, chat_id, user_id, username, dao_id, photo
)
if result.get("ok"):
return result
return result
# Check if it's a voice message
voice = update.message.get("voice")
@@ -1947,6 +2173,26 @@ async def handle_telegram_webhook(
if not text:
text = update.message.get("text", "")
caption = update.message.get("caption", "")
# If user asks about a recently sent photo, run vision on cached photo file_id.
if text and _looks_like_photo_followup(text):
recent_file_id = _get_recent_photo_file_id(agent_config.agent_id, chat_id, user_id)
if recent_file_id:
logger.info(
f"{agent_config.name}: Detected follow-up photo question; using cached file_id={recent_file_id}"
)
followup_result = await process_photo(
agent_config=agent_config,
update=update,
chat_id=chat_id,
user_id=user_id,
username=username,
dao_id=dao_id,
photo={"file_id": recent_file_id},
caption_override=text,
bypass_media_gate=True,
)
return followup_result
if not text and not caption:
# Check for unsupported message types and silently ignore
@@ -2149,9 +2395,10 @@ async def handle_telegram_webhook(
return {"ok": True, "ack": True, "reason": respond_reason}
# FULL: proceed with LLM/Router call
# For prober requests, respond but don't send to Telegram
# For prober requests, skip LLM/Router entirely to save tokens
if is_prober:
logger.info(f"\U0001f9ea PROBER: Agent {agent_config.agent_id} responding to prober request. Reason: {respond_reason}")
logger.info(f"\U0001f9ea PROBER: Agent {agent_config.agent_id} responding to prober (no LLM call). Reason: {respond_reason}")
return {"ok": True, "agent": agent_config.agent_id, "prober": True, "response_preview": "[prober-skip-llm]"}
else:
logger.info(f"\u2705 SOWA: Agent {agent_config.agent_id} WILL respond (FULL). Reason: {respond_reason}")
@@ -2183,6 +2430,15 @@ async def handle_telegram_webhook(
else:
message_with_context = f"{training_prefix}{text}"
preferred_lang = await resolve_preferred_language_persistent(
chat_id=chat_id,
user_id=user_id,
text=text or "",
telegram_lang_code=from_user.get("language_code"),
team_id=dao_id,
)
preferred_lang_label = preferred_language_label(preferred_lang)
# Build request to Router
system_prompt = agent_config.system_prompt
logger.info(f"📝 {agent_config.name} system_prompt length: {len(system_prompt) if system_prompt else 0} chars")
@@ -2206,6 +2462,9 @@ async def handle_telegram_webhook(
"mentioned_bots": mentioned_bots,
"requires_complex_reasoning": needs_complex_reasoning,
"is_reply_to_agent": is_reply_to_agent,
"is_training_group": is_training_group,
"preferred_response_language": preferred_lang,
"preferred_response_language_label": preferred_lang_label,
},
"context": {
"agent_name": agent_config.name,
@@ -2218,17 +2477,30 @@ async def handle_telegram_webhook(
},
}
if should_force_detailed_reply(text):
router_request["metadata"]["force_detailed"] = True
if should_force_concise_reply(text):
# IMPORTANT: preserve conversation context! Only append concise instruction
router_request["metadata"]["force_concise"] = True
router_request["message"] = (
router_request["message"]
+ "\n\n(Інструкція: дай максимально коротку відповідь, якщо не просили деталей "
"і дочекайся додаткового питання.)"
+ "\n\n(Інструкція: спочатку дай коротку відповідь по суті (1-3 абзаци), "
"а якщо користувач попросить — розгорни детально.)"
+ f"\n(Мова відповіді: {preferred_lang_label}.)"
+ "\n(Не потрібно щоразу представлятися по імені або писати шаблонне: 'чим можу допомогти'.)"
)
if needs_complex_reasoning:
router_request["metadata"]["provider"] = "cloud_deepseek"
router_request["metadata"]["reason"] = "auto_complex"
if not should_force_concise_reply(text):
router_request["message"] = (
router_request["message"]
+ f"\n\n(Мова відповіді: {preferred_lang_label}.)"
+ "\n(Не потрібно щоразу представлятися по імені або писати шаблонне: 'чим можу допомогти'.)"
)
# Send to Router
logger.info(f"Sending to Router: agent={agent_config.agent_id}, dao={dao_id}, user=tg:{user_id}")
@@ -2238,6 +2510,9 @@ async def handle_telegram_webhook(
if isinstance(response, dict) and response.get("ok"):
answer_text = response.get("data", {}).get("text") or response.get("response", "")
image_base64 = response.get("image_base64") or response.get("data", {}).get("image_base64")
file_base64 = response.get("file_base64") or response.get("data", {}).get("file_base64")
file_name = response.get("file_name") or response.get("data", {}).get("file_name") or "artifact.bin"
file_mime = response.get("file_mime") or response.get("data", {}).get("file_mime") or "application/octet-stream"
# Debug logging
logger.info(f"📦 Router response: {len(answer_text)} chars, model={response.get('model')}, backend={response.get('backend')}")
@@ -2246,7 +2521,9 @@ async def handle_telegram_webhook(
logger.info(f"🖼️ Received image_base64: {len(image_base64)} chars")
else:
logger.debug("⚠️ No image_base64 in response")
if file_base64:
logger.info(f"📄 Received file_base64: {len(file_base64)} chars ({file_name})")
# Check for NO_OUTPUT (LLM decided not to respond)
if is_no_output_response(answer_text):
logger.info(f"🔇 NO_OUTPUT: Agent {agent_config.agent_id} returned empty/NO_OUTPUT. Not sending to Telegram.")
@@ -2305,8 +2582,27 @@ async def handle_telegram_webhook(
logger.info(f"🧪 PROBER: Skipping Telegram send for prober request. Response: {answer_text[:100]}...")
return {"ok": True, "agent": agent_config.agent_id, "prober": True, "response_preview": answer_text[:100]}
# Send file artifact if generated
if file_base64:
try:
file_bytes = base64.b64decode(file_base64)
token = telegram_token or os.getenv("TELEGRAM_BOT_TOKEN")
url = f"https://api.telegram.org/bot{token}/sendDocument"
caption = answer_text[:1024] if answer_text else ""
safe_name = str(file_name).split("/")[-1].split("\\")[-1] or "artifact.bin"
async with httpx.AsyncClient() as client:
files = {"document": (safe_name, BytesIO(file_bytes), file_mime)}
data = {"chat_id": chat_id}
if caption:
data["caption"] = caption
response_doc = await client.post(url, files=files, data=data, timeout=45.0)
response_doc.raise_for_status()
logger.info(f"✅ Sent generated document to Telegram chat {chat_id}: {safe_name}")
except Exception as e:
logger.error(f"❌ Failed to send document to Telegram: {e}")
await send_telegram_message(chat_id, answer_text or "Файл згенеровано, але не вдалося надіслати документ.", telegram_token)
# Send image if generated
if image_base64:
elif image_base64:
try:
# Decode base64 image
image_bytes = base64.b64decode(image_base64)
@@ -2344,6 +2640,7 @@ async def handle_telegram_webhook(
agent_metadata={
"mentioned_bots": mentioned_bots,
"requires_complex_reasoning": needs_complex_reasoning,
"preferred_language": preferred_lang,
},
username=username,
)

View File

@@ -20,6 +20,46 @@ except ImportError:
# Router configuration from environment
ROUTER_BASE_URL = os.getenv("ROUTER_URL", "http://127.0.0.1:9102")
ROUTER_TIMEOUT = float(os.getenv("ROUTER_TIMEOUT", "180.0"))
GATEWAY_MAX_TOKENS_DEFAULT = int(os.getenv("GATEWAY_MAX_TOKENS_DEFAULT", "700"))
GATEWAY_MAX_TOKENS_CONCISE = int(os.getenv("GATEWAY_MAX_TOKENS_CONCISE", "220"))
GATEWAY_MAX_TOKENS_TRAINING = int(os.getenv("GATEWAY_MAX_TOKENS_TRAINING", "900"))
GATEWAY_TEMPERATURE_DEFAULT = float(os.getenv("GATEWAY_TEMPERATURE_DEFAULT", "0.4"))
GATEWAY_MAX_TOKENS_SENPAI_DEFAULT = int(os.getenv("GATEWAY_MAX_TOKENS_SENPAI_DEFAULT", "320"))
GATEWAY_MAX_TOKENS_DETAILED = int(os.getenv("GATEWAY_MAX_TOKENS_DETAILED", "900"))
def _apply_runtime_communication_guardrails(system_prompt: str, metadata: Dict[str, Any]) -> str:
"""Apply global communication constraints for all agents in Telegram flows."""
if not system_prompt:
return system_prompt
lang_label = (metadata or {}).get("preferred_response_language_label") or "user language"
guardrail = (
"\n\n[GLOBAL COMMUNICATION POLICY]\n"
"1) Do not introduce yourself by name in every message.\n"
"2) Do not add repetitive generic closers like 'how can I help' unless user explicitly asks.\n"
"3) Continue the dialog naturally from context.\n"
f"4) Respond in {lang_label}, matching the user's latest language.\n"
)
return system_prompt + guardrail
def _apply_agent_style_guardrails(agent_id: str, system_prompt: str) -> str:
"""Apply lightweight runtime style constraints for specific agents."""
if not system_prompt:
return system_prompt
if agent_id == "nutra":
nutra_guardrail = (
"\n\n[STYLE LOCK - NUTRA]\n"
"Always write in first-person singular and feminine form.\n"
"Use feminine wording in Ukrainian/Russian (e.g., 'я підготувала', 'я готова', "
"'я зрозуміла').\n"
"Never switch to masculine forms (e.g., 'понял', 'готов').\n"
)
return system_prompt + nutra_guardrail
return system_prompt
async def send_to_router(body: Dict[str, Any]) -> Dict[str, Any]:
@@ -32,6 +72,8 @@ async def send_to_router(body: Dict[str, Any]) -> Dict[str, Any]:
context = body.get("context", {})
system_prompt = body.get("system_prompt") or context.get("system_prompt")
system_prompt = _apply_agent_style_guardrails(agent_id, system_prompt)
system_prompt = _apply_runtime_communication_guardrails(system_prompt, metadata)
if system_prompt:
logger.info(f"Using system prompt ({len(system_prompt)} chars) for agent {agent_id}")
@@ -39,10 +81,28 @@ async def send_to_router(body: Dict[str, Any]) -> Dict[str, Any]:
infer_url = f"{ROUTER_BASE_URL}/v1/agents/{agent_id}/infer"
metadata["agent_id"] = agent_id
# Keep defaults moderate to avoid overly long replies while preserving flexibility.
max_tokens = GATEWAY_MAX_TOKENS_DEFAULT
# Senpai tends to over-verbose responses in Telegram; use lower default unless user asked details.
if agent_id == "senpai":
max_tokens = GATEWAY_MAX_TOKENS_SENPAI_DEFAULT
if metadata.get("is_training_group"):
max_tokens = GATEWAY_MAX_TOKENS_TRAINING
if metadata.get("force_detailed"):
max_tokens = max(max_tokens, GATEWAY_MAX_TOKENS_DETAILED)
if metadata.get("force_concise"):
max_tokens = min(max_tokens, GATEWAY_MAX_TOKENS_CONCISE)
infer_body = {
"prompt": message,
"system_prompt": system_prompt,
"metadata": metadata
"metadata": metadata,
"max_tokens": max_tokens,
"temperature": float(metadata.get("temperature_override", GATEWAY_TEMPERATURE_DEFAULT)),
}
images = context.get("images", [])
@@ -54,7 +114,10 @@ async def send_to_router(body: Dict[str, Any]) -> Dict[str, Any]:
infer_body["provider_override"] = metadata["provider"]
prov = metadata.get("provider", "default")
logger.info(f"Sending to Router ({infer_url}): agent={agent_id}, provider={prov}, has_images={bool(images)}, prompt_len={len(message)}")
logger.info(
f"Sending to Router ({infer_url}): agent={agent_id}, provider={prov}, "
f"has_images={bool(images)}, prompt_len={len(message)}, max_tokens={max_tokens}"
)
try:
async with httpx.AsyncClient(timeout=ROUTER_TIMEOUT) as client:
@@ -74,12 +137,18 @@ async def send_to_router(body: Dict[str, Any]) -> Dict[str, Any]:
"ok": True,
"data": {
"text": result.get("response", result.get("text", "")),
"image_base64": result.get("image_base64")
"image_base64": result.get("image_base64"),
"file_base64": result.get("file_base64"),
"file_name": result.get("file_name"),
"file_mime": result.get("file_mime"),
},
"response": result.get("response", result.get("text", "")),
"model": result.get("model"),
"backend": result.get("backend"),
"image_base64": result.get("image_base64")
"image_base64": result.get("image_base64"),
"file_base64": result.get("file_base64"),
"file_name": result.get("file_name"),
"file_mime": result.get("file_mime"),
}
except httpx.TimeoutException as e: