fix: quarantine dead brand commands + implement Memory LLM summary

Brand commands (~290 lines):
- Code was trapped inside `if reply_to_message:` block (unreachable)
- Moved to feature flag: ENABLE_BRAND_COMMANDS=true to activate
- Zero re-indentation: 8sp code naturally fits as feature flag body
- Helper functions (_brand_*, _artifact_*) unchanged

Memory LLM Summary:
- Replace placeholder with real DeepSeek API integration
- Structured output: summary, goals, decisions, open_questions, next_steps, key_facts
- Graceful fallback if API key not set or call fails
- Added MEMORY_DEEPSEEK_API_KEY config
- Ukrainian output language

Deployed and verified on NODE1.

Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
Apple
2026-02-09 09:42:44 -08:00
parent 27e66b90bf
commit 3b924118be
3 changed files with 127 additions and 9 deletions

View File

@@ -1365,6 +1365,13 @@ async def handle_telegram_webhook(
f"(user {username} replied to bot msg {reply_to_message.get('message_id', '?')})"
)
# === BRAND COMMANDS (experimental, disabled by default) ===
# ~290 lines of brand/presentation command handlers.
# This code was NEVER active in production (was trapped inside wrong indent block).
# Quarantined behind feature flag. Enable with: ENABLE_BRAND_COMMANDS=true
# See: /бренд, /бренд_інтейк, /бренд_тема, /презентація, /job_статус
_brand_commands_enabled = os.environ.get("ENABLE_BRAND_COMMANDS", "").lower() in ("1", "true", "yes")
if _brand_commands_enabled:
text = update.message.get("text", "")
# Simple brand commands (Ukrainian)

View File

@@ -34,6 +34,12 @@ class Settings(BaseSettings):
cohere_model: str = "embed-multilingual-v3.0" # 1024 dimensions
embedding_dimensions: int = 1024
# DeepSeek LLM (for summary generation)
deepseek_api_key: str = "" # Must be set via MEMORY_DEEPSEEK_API_KEY env var
deepseek_api_url: str = "https://api.deepseek.com/v1/chat/completions"
deepseek_model: str = "deepseek-chat"
summary_max_tokens: int = 800
# Memory settings
short_term_window_messages: int = 20
short_term_window_minutes: int = 60

View File

@@ -7,10 +7,11 @@ DAARION Memory Service - FastAPI Application
- Long-term: memory items (персональна/проектна)
"""
from contextlib import asynccontextmanager
from typing import List, Optional
from typing import List, Optional, Dict, Any
from fastapi import Depends, BackgroundTasks
from uuid import UUID
import structlog
import httpx
from fastapi import FastAPI, HTTPException, Query
from fastapi.middleware.cors import CORSMiddleware
@@ -30,6 +31,110 @@ logger = structlog.get_logger()
settings = get_settings()
SUMMARY_SYSTEM_PROMPT = """You are a conversation summarizer for DAARION AI agents.
Given a list of conversation events, produce a structured summary in Ukrainian.
Output format (JSON):
{
"summary": "Стислий опис розмови (2-5 речень)",
"goals": ["Ціль 1", "Ціль 2"],
"decisions": ["Рішення 1", "Рішення 2"],
"open_questions": ["Питання 1"],
"next_steps": ["Крок 1", "Крок 2"],
"key_facts": ["Факт 1", "Факт 2"]
}
Rules:
- Write in Ukrainian
- Be concise but preserve key information
- Focus on actionable items and decisions
- If no items for a category, use empty array []
- Return ONLY valid JSON, no markdown
"""
async def _llm_generate_summary(events: List[Dict[str, Any]]) -> Dict[str, Any]:
"""
Call DeepSeek API to generate structured thread summary.
Falls back to placeholder if API key is not configured or call fails.
"""
if not settings.deepseek_api_key:
logger.warning("deepseek_api_key_not_set", hint="Set MEMORY_DEEPSEEK_API_KEY to enable LLM summaries")
return {
"summary": f"Summary of {len(events)} events. [LLM not configured — set MEMORY_DEEPSEEK_API_KEY]",
"goals": [],
"decisions": [],
"open_questions": [],
"next_steps": [],
"key_facts": []
}
# Format events for LLM
events_text = []
for ev in events[-50:]: # Limit to last 50 events to stay within context
role = ev.get("role", "unknown")
content = ev.get("content", "")[:500] # Truncate long messages
events_text.append(f"[{role}]: {content}")
conversation = "\n".join(events_text)
try:
async with httpx.AsyncClient(timeout=30.0) as client:
resp = await client.post(
settings.deepseek_api_url,
headers={
"Authorization": f"Bearer {settings.deepseek_api_key}",
"Content-Type": "application/json",
},
json={
"model": settings.deepseek_model,
"messages": [
{"role": "system", "content": SUMMARY_SYSTEM_PROMPT},
{"role": "user", "content": f"Summarize this conversation:\n\n{conversation}"},
],
"max_tokens": settings.summary_max_tokens,
"temperature": 0.3,
},
)
resp.raise_for_status()
data = resp.json()
content = data["choices"][0]["message"]["content"]
# Parse JSON response
import json
try:
parsed = json.loads(content)
return {
"summary": parsed.get("summary", content),
"goals": parsed.get("goals", []),
"decisions": parsed.get("decisions", []),
"open_questions": parsed.get("open_questions", []),
"next_steps": parsed.get("next_steps", []),
"key_facts": parsed.get("key_facts", []),
}
except json.JSONDecodeError:
# LLM returned non-JSON — use raw text as summary
logger.warning("llm_summary_not_json", content_preview=content[:100])
return {
"summary": content,
"goals": [],
"decisions": [],
"open_questions": [],
"next_steps": [],
"key_facts": [],
}
except Exception as e:
logger.error("llm_summary_failed", error=str(e))
return {
"summary": f"Summary of {len(events)} events. [LLM call failed: {str(e)[:100]}]",
"goals": [],
"decisions": [],
"open_questions": [],
"next_steps": [],
"key_facts": [],
}
@asynccontextmanager
async def lifespan(app: FastAPI):
"""Startup and shutdown events"""
@@ -376,15 +481,15 @@ async def create_summary(thread_id: UUID, request: SummaryRequest):
# Get events to summarize
events = await db.get_events_for_summary(thread_id)
# TODO: Call LLM to generate summary
# For now, create a placeholder
summary_text = f"Summary of {len(events)} events. [Implement LLM summarization]"
# Generate structured summary via DeepSeek LLM
llm_result = await _llm_generate_summary(events)
summary_text = llm_result["summary"]
state = {
"goals": [],
"decisions": [],
"open_questions": [],
"next_steps": [],
"key_facts": []
"goals": llm_result["goals"],
"decisions": llm_result["decisions"],
"open_questions": llm_result["open_questions"],
"next_steps": llm_result["next_steps"],
"key_facts": llm_result["key_facts"]
}
# Create summary