agromatrix: add pending-question memory, anti-repeat guard, and numeric contract

This commit is contained in:
NODA1 System
2026-02-21 12:47:23 +01:00
parent a87a1fe52c
commit d963c52fe5
2 changed files with 621 additions and 50 deletions

View File

@@ -11,6 +11,7 @@ import httpx
import logging
import hashlib
import time # For latency metrics
from difflib import SequenceMatcher
# CrewAI Integration
try:
@@ -262,12 +263,114 @@ def _build_agromatrix_deterministic_fallback(candidates: List[Dict[str, Any]]) -
EMPTY_ANSWER_GUARD_AGENTS = {"devtools", "monitor"}
DETERMINISTIC_PLANT_POLICY_AGENTS = {
part.strip().lower()
for part in os.getenv(
"DETERMINISTIC_PLANT_POLICY_AGENTS",
"agromatrix,greenfood,nutra",
).split(",")
if part.strip()
}
REPEAT_FINGERPRINT_MIN_SIMILARITY = float(os.getenv("AGENT_REPEAT_FINGERPRINT_MIN_SIMILARITY", "0.92"))
def _normalize_text_response(text: str) -> str:
return re.sub(r"\s+", " ", str(text or "")).strip()
def _response_fingerprint(text: str) -> str:
normalized = _normalize_text_response(text).lower()
normalized = re.sub(r"[^a-zаіїєґ0-9%./:;,+\- ]+", " ", normalized)
normalized = re.sub(r"\s+", " ", normalized).strip()
return normalized[:240]
def _fingerprint_similarity(a: str, b: str) -> float:
if not a or not b:
return 0.0
return SequenceMatcher(None, a, b).ratio()
def _looks_like_user_question(text: str) -> bool:
t = (text or "").strip().lower()
if not t:
return False
if "?" in t:
return True
starters = (
"що", "як", "чому", "коли", "де", "скільки", "яка", "який", "які",
"what", "how", "why", "when", "where", "which", "can you",
"что", "как", "почему", "когда", "где", "сколько",
)
return any(t.startswith(s + " ") for s in starters)
def _looks_like_negative_feedback(text: str) -> bool:
t = (text or "").lower()
markers = (
"не вірно", "невірно", "неправильно", "помилка", "знову не так",
"це не так", "не релевантно", "повтор", "ти знову", "мимо",
"wrong", "incorrect", "not relevant", "repeat", "again wrong",
"неверно", "неправильно", "это ошибка", "снова не так",
)
return any(m in t for m in markers)
def _looks_like_numeric_request(text: str) -> bool:
t = (text or "").lower()
markers = (
"скільки", "сума", "витра", "cost", "total", "amount", "ціна",
"вартість", "дохід", "прибут", "маржа", "баланс", "unit cost",
"сколько", "сумма", "затрат", "стоимость", "расход",
)
return any(m in t for m in markers)
def _numeric_contract_present(text: str) -> bool:
t = _normalize_text_response(text)
low = t.lower()
if not re.search(r"\d", low):
return False
has_value_with_unit = re.search(
r"\b\d[\d\s.,]*\s*(грн|uah|usd|eur|kg|кг|т|л|га|шт|%|тон|літр|hectare|ha)\b",
low,
) is not None
has_explicit_source = any(
re.search(pattern, low) is not None
for pattern in (
r"\bsheet\s*[:#]?\s*[a-z0-9_]+",
r"\brow\s*[:#]?\s*\d+",
r"\bрядок\s*[:#]?\s*\d+",
r"\bлист\s*[:#]?\s*[a-zа-я0-9_]+",
r"\bcell\s*[:#]?\s*[a-z]+\d+",
r"\омірк[а-я]*\s*[:#]?\s*[a-zа-я]+\d+",
r"\bsource\s*[:#]",
r"\bджерел[оа]\s*[:#]",
)
)
return bool(has_value_with_unit and has_explicit_source)
def _build_numeric_contract_uncertain_response() -> str:
return (
"Не можу підтвердити точне число без джерела. "
"Щоб дати коректну відповідь, надішли таблицю/файл або уточни лист і діапазон. "
"Формат відповіді дам строго як: value + unit + source(sheet,row)."
)
def _response_is_uncertain_or_incomplete(text: str) -> bool:
low = _normalize_text_response(text).lower()
if not low:
return True
markers = (
"не впевнений", "не можу", "надішли", "уточни", "уточніть",
"потрібно більше", "insufficient", "need more", "please send",
"не уверен", "не могу", "уточни", "нужно больше",
)
return any(m in low for m in markers)
def _needs_empty_answer_recovery(text: str) -> bool:
normalized = _normalize_text_response(text)
if not normalized:
@@ -1369,6 +1472,8 @@ async def agent_infer(agent_id: str, request: InferRequest):
# MEMORY RETRIEVAL (v4.0 - Universal for all agents)
# =========================================================================
memory_brief_text = ""
brief: Optional[MemoryBrief] = None
session_state = None
# Extract metadata once for both retrieval and storage
metadata = request.metadata or {}
channel = "telegram" # Default
@@ -1382,7 +1487,32 @@ async def agent_infer(agent_id: str, request: InferRequest):
# IMPORTANT: inspect only the latest user text when provided by gateway,
# not the full context-augmented prompt.
raw_user_text = str(metadata.get("raw_user_text", "") or "").strip()
image_guard_text = raw_user_text if raw_user_text else request.prompt
incoming_user_text = raw_user_text if raw_user_text else request.prompt
image_guard_text = incoming_user_text
track_pending_question = _looks_like_user_question(incoming_user_text)
if (
MEMORY_RETRIEVAL_AVAILABLE
and memory_retrieval
and chat_id
and user_id
and track_pending_question
):
try:
await memory_retrieval.register_pending_question(
channel=channel,
chat_id=chat_id,
user_id=user_id,
agent_id=request_agent_id,
question_text=incoming_user_text,
metadata={
"source": "router_infer",
"has_images": bool(request.images),
},
)
except Exception as e:
logger.debug(f"Pending question register skipped: {e}")
if (not request.images) and _looks_like_image_question(image_guard_text):
return InferResponse(
response=(
@@ -1405,6 +1535,7 @@ async def agent_infer(agent_id: str, request: InferRequest):
username=username,
message=request.prompt
)
session_state = brief.session_state if brief else None
memory_brief_text = brief.to_text(max_lines=10)
if memory_brief_text:
logger.info(f"🧠 Memory brief for {request_agent_id}: {len(memory_brief_text)} chars")
@@ -1454,6 +1585,63 @@ async def agent_infer(agent_id: str, request: InferRequest):
f"🧩 Prompt meta for {agent_id}: source={system_prompt_source}, "
f"version={effective_metadata['system_prompt_version']}, hash={system_prompt_hash}"
)
async def _finalize_response_text(text: str, backend_tag: str) -> str:
final_text = _normalize_text_response(text)
if not final_text:
return final_text
# Agro numeric contract: no numbers without unit + source marker.
if request_agent_id == "agromatrix" and _looks_like_numeric_request(incoming_user_text):
if not _numeric_contract_present(final_text):
final_text = _build_numeric_contract_uncertain_response()
# Anti-repeat guard: if user reports wrong answer and new answer is near-identical
# to previous one, force non-repetitive recovery text.
prev_fp = ""
if session_state and getattr(session_state, "last_answer_fingerprint", None):
prev_fp = str(session_state.last_answer_fingerprint or "")
new_fp = _response_fingerprint(final_text)
if prev_fp and new_fp:
similarity = _fingerprint_similarity(prev_fp, new_fp)
if similarity >= REPEAT_FINGERPRINT_MIN_SIMILARITY and _looks_like_negative_feedback(incoming_user_text):
final_text = (
"Прийняв, попередня відповідь була не по суті. Не повторюю її. "
"Переформулюю коротко і по ділу: надішли 1 конкретне питання або файл/фото, "
"і я дам перевірену відповідь із джерелом."
)
new_fp = _response_fingerprint(final_text)
logger.warning(
f"🔁 Repeat guard fired for {request_agent_id}: similarity={similarity:.3f}, backend={backend_tag}"
)
# Resolve oldest pending question only when answer is not uncertain.
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id:
try:
if track_pending_question and not _response_is_uncertain_or_incomplete(final_text):
await memory_retrieval.resolve_pending_question(
channel=channel,
chat_id=chat_id,
user_id=user_id,
agent_id=request_agent_id,
answer_text=final_text,
reason="answered",
)
except Exception as e:
logger.debug(f"Pending question resolve skipped: {e}")
try:
if session_state and getattr(session_state, "conversation_id", None):
await memory_retrieval.update_session_state(
session_state.conversation_id,
last_answer_fingerprint=new_fp[:240],
last_user_id=user_id,
last_user_nick=username,
)
except Exception as e:
logger.debug(f"Session fingerprint update skipped: {e}")
return final_text
# Determine which backend to use
# Use router config to get default model for agent, fallback to qwen3:8b
@@ -1601,6 +1789,8 @@ async def agent_infer(agent_id: str, request: InferRequest):
parts = re.split(r"(?<=[.!?])\s+", final_response_text.strip())
if len(parts) > 3:
final_response_text = " ".join(parts[:3]).strip()
final_response_text = await _finalize_response_text(final_response_text, "crewai")
# Store interaction in memory
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id:
@@ -1656,7 +1846,7 @@ async def agent_infer(agent_id: str, request: InferRequest):
# 1) run plant classifiers first (nature-id / plantnet)
# 2) apply confidence threshold
# 3) LLM only explains classifier result, no new guessing
if request_agent_id == "agromatrix" and plant_intent and TOOL_MANAGER_AVAILABLE and tool_manager:
if request_agent_id in DETERMINISTIC_PLANT_POLICY_AGENTS and plant_intent and TOOL_MANAGER_AVAILABLE and tool_manager:
try:
image_inputs = _extract_image_inputs_for_plant_tools(request.images, metadata)
if image_inputs:
@@ -1697,6 +1887,7 @@ async def agent_infer(agent_id: str, request: InferRequest):
top_conf = float(candidates[0].get("confidence", 0.0)) if candidates else 0.0
if (not candidates) or (top_conf < threshold):
response_text = _build_agromatrix_not_sure_response(candidates, threshold)
response_text = await _finalize_response_text(response_text, "plant-id-deterministic-uncertain")
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id:
asyncio.create_task(
memory_retrieval.store_message(
@@ -1770,6 +1961,8 @@ async def agent_infer(agent_id: str, request: InferRequest):
if (top_name and top_name not in low) and (top_sci and top_sci not in low):
response_text = _build_agromatrix_deterministic_fallback(candidates)
response_text = await _finalize_response_text(response_text, llm_backend)
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id:
asyncio.create_task(
memory_retrieval.store_message(
@@ -1916,7 +2109,7 @@ async def agent_infer(agent_id: str, request: InferRequest):
# Plant identification safety gate:
# avoid hard species claims when confidence is low or evidence is weak.
if request_agent_id == "agromatrix" and plant_intent and (uncertain or len(vision_sources) < 2):
if request_agent_id in DETERMINISTIC_PLANT_POLICY_AGENTS and plant_intent and (uncertain or len(vision_sources) < 2):
full_response = _build_cautious_plant_response(full_response or raw_response, len(vision_sources))
# Image quality gate: one soft retry if response looks empty/meta.
@@ -1948,8 +2141,10 @@ async def agent_infer(agent_id: str, request: InferRequest):
if _image_response_needs_retry(full_response):
full_response = _build_image_fallback_response(request_agent_id, request.prompt)
elif request_agent_id == "agromatrix" and _vision_response_is_blurry(full_response):
elif request_agent_id in DETERMINISTIC_PLANT_POLICY_AGENTS and _vision_response_is_blurry(full_response):
full_response = _build_image_fallback_response(request_agent_id, request.prompt)
full_response = await _finalize_response_text(full_response, "swapper-vision")
# Store vision message in agent-specific memory
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id and full_response:
@@ -1979,8 +2174,12 @@ async def agent_infer(agent_id: str, request: InferRequest):
)
else:
logger.error(f"❌ Swapper vision error: {vision_resp.status_code} - {vision_resp.text[:200]}")
fallback_response = await _finalize_response_text(
_build_image_fallback_response(request_agent_id, request.prompt),
"swapper-vision-fallback",
)
return InferResponse(
response=_build_image_fallback_response(request_agent_id, request.prompt),
response=fallback_response,
model="qwen3-vl-8b",
tokens_used=None,
backend="swapper-vision-fallback"
@@ -1988,8 +2187,12 @@ async def agent_infer(agent_id: str, request: InferRequest):
except Exception as e:
logger.error(f"❌ Vision processing failed: {e}", exc_info=True)
fallback_response = await _finalize_response_text(
_build_image_fallback_response(request_agent_id, request.prompt),
"swapper-vision-fallback",
)
return InferResponse(
response=_build_image_fallback_response(request_agent_id, request.prompt),
response=fallback_response,
model="qwen3-vl-8b",
tokens_used=None,
backend="swapper-vision-fallback"
@@ -2435,6 +2638,7 @@ async def agent_infer(agent_id: str, request: InferRequest):
logger.debug(f" Tool {tr['name']}: no image_base64")
logger.info(f"{cloud['name'].upper()} response received, {tokens_used} tokens")
response_text = await _finalize_response_text(response_text, f"{cloud['name']}-cloud")
# Store message in agent-specific memory (async, non-blocking)
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id:
@@ -2563,6 +2767,7 @@ async def agent_infer(agent_id: str, request: InferRequest):
"Я не отримав корисну відповідь з першої спроби. "
"Сформулюй запит коротко ще раз, і я відповім конкретно."
)
local_response = await _finalize_response_text(local_response, "swapper+ollama")
# Store in agent-specific memory
if MEMORY_RETRIEVAL_AVAILABLE and memory_retrieval and chat_id and user_id and local_response:
@@ -2607,8 +2812,9 @@ async def agent_infer(agent_id: str, request: InferRequest):
if generate_resp.status_code == 200:
data = generate_resp.json()
fallback_text = await _finalize_response_text(data.get("response", ""), "ollama-direct")
return InferResponse(
response=data.get("response", ""),
response=fallback_text,
model=model,
tokens_used=data.get("eval_count", 0),
backend="ollama-direct"