snapshot: NODE1 production state 2026-02-09
Complete snapshot of /opt/microdao-daarion/ from NODE1 (144.76.224.179).
This represents the actual running production code that has diverged
significantly from the previous main branch.
Key changes from old main:
- Gateway (http_api.py): expanded from ~40KB to 164KB with full agent support
- Router: new /v1/agents/{id}/infer endpoint with vision + DeepSeek routing
- Behavior Policy: SOWA v2.2 (3-level: FULL/ACK/SILENT)
- Agent Registry: config/agent_registry.yml as single source of truth
- 13 agents configured (was 3)
- Memory service integration
- CrewAI teams and roles
Excluded from snapshot: venv/, .env, data/, backups, .tgz archives
Co-authored-by: Cursor <cursoragent@cursor.com>
This commit is contained in:
8
packages/agromatrix-tools/agromatrix_tools/__init__.py
Normal file
8
packages/agromatrix-tools/agromatrix_tools/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
||||
# AgroMatrix tools package
|
||||
|
||||
from . import tool_dictionary
|
||||
from . import normalize
|
||||
|
||||
from . import tool_operation_plan
|
||||
|
||||
from . import tool_dictionary_review
|
||||
74
packages/agromatrix-tools/agromatrix_tools/audit.py
Normal file
74
packages/agromatrix-tools/agromatrix_tools/audit.py
Normal file
@@ -0,0 +1,74 @@
|
||||
import os
|
||||
import json
|
||||
import time
|
||||
import hashlib
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from nats.aio.client import Client as NATS
|
||||
|
||||
NATS_URL = os.getenv('NATS_URL', 'nats://localhost:4222')
|
||||
AUDIT_FILE = os.getenv('AGX_AUDIT_FILE', 'artifacts/audit.log.jsonl')
|
||||
MAX_BYTES = int(os.getenv('AGX_AUDIT_MAX_BYTES', '4096'))
|
||||
REDACT_KEYS = set(k.strip().lower() for k in os.getenv('AGX_AUDIT_REDACT_KEYS', 'token,secret,password,authorization,cookie,api_key,signature').split(','))
|
||||
TRACE_ID = os.getenv('AGX_TRACE_ID', '')
|
||||
|
||||
|
||||
def _sanitize(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: ("[REDACTED]" if k.lower() in REDACT_KEYS else _sanitize(v)) for k, v in obj.items()}
|
||||
if isinstance(obj, list):
|
||||
return [_sanitize(v) for v in obj]
|
||||
return obj
|
||||
|
||||
|
||||
def _preview(obj):
|
||||
try:
|
||||
sanitized = _sanitize(obj)
|
||||
raw = json.dumps(sanitized, ensure_ascii=False, sort_keys=True, default=str)
|
||||
size = len(raw.encode("utf-8"))
|
||||
if size > MAX_BYTES:
|
||||
return raw[:MAX_BYTES], True, size
|
||||
return raw, False, size
|
||||
except Exception:
|
||||
return None, True, 0
|
||||
|
||||
|
||||
def _hash(obj) -> str:
|
||||
try:
|
||||
raw = json.dumps(obj, ensure_ascii=False, sort_keys=True, default=str)
|
||||
except Exception:
|
||||
raw = str(obj)
|
||||
return hashlib.sha256(raw.encode()).hexdigest()[:16]
|
||||
|
||||
|
||||
async def _publish(subject: str, payload: dict):
|
||||
nc = NATS()
|
||||
await nc.connect(servers=[NATS_URL])
|
||||
await nc.publish(subject, json.dumps(payload, ensure_ascii=False).encode())
|
||||
await nc.flush(1)
|
||||
await nc.drain()
|
||||
|
||||
|
||||
def audit_tool_call(tool: str, inputs: dict, outputs: dict, success: bool, duration_ms: int):
|
||||
inputs_preview, in_trunc, in_size = _preview(inputs)
|
||||
outputs_preview, out_trunc, out_size = _preview(outputs)
|
||||
event = {
|
||||
'trace_id': TRACE_ID,
|
||||
'tool': tool,
|
||||
'inputs_hash': _hash(inputs),
|
||||
'outputs_hash': _hash(outputs),
|
||||
'inputs_preview': inputs_preview,
|
||||
'outputs_preview': outputs_preview,
|
||||
'payload_truncated': (in_trunc or out_trunc),
|
||||
'payload_size': {'inputs': in_size, 'outputs': out_size},
|
||||
'success': success,
|
||||
'duration_ms': duration_ms,
|
||||
'ts': int(time.time() * 1000)
|
||||
}
|
||||
Path(AUDIT_FILE).parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(AUDIT_FILE, 'a', encoding='utf-8') as f:
|
||||
f.write(json.dumps(event, ensure_ascii=False) + '\n')
|
||||
try:
|
||||
asyncio.run(_publish('agx.audit.toolcall', event))
|
||||
except Exception:
|
||||
pass
|
||||
16
packages/agromatrix-tools/agromatrix_tools/common.py
Normal file
16
packages/agromatrix-tools/agromatrix_tools/common.py
Normal file
@@ -0,0 +1,16 @@
|
||||
import os
|
||||
import json
|
||||
import requests
|
||||
|
||||
|
||||
def _auth_headers():
|
||||
token = os.getenv("FARMOS_TOKEN")
|
||||
if token:
|
||||
return {"Authorization": f"Bearer {token}"}
|
||||
user = os.getenv("FARMOS_USER")
|
||||
pwd = os.getenv("FARMOS_PASSWORD")
|
||||
if user and pwd:
|
||||
import base64
|
||||
auth = base64.b64encode(f"{user}:{pwd}".encode()).decode()
|
||||
return {"Authorization": f"Basic {auth}"}
|
||||
return {}
|
||||
51
packages/agromatrix-tools/agromatrix_tools/normalize.py
Normal file
51
packages/agromatrix-tools/agromatrix_tools/normalize.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import re
|
||||
from typing import Tuple
|
||||
|
||||
|
||||
def parse_quantity(text: str) -> Tuple[float, str]:
|
||||
# supports "2,5 т" and "2.5 t"
|
||||
m = re.search(r"([0-9]+(?:[\.,][0-9]+)?)\s*([a-zA-Zа-яА-Я/]+)", text)
|
||||
if not m:
|
||||
raise ValueError("quantity_not_found")
|
||||
value = float(m.group(1).replace(',', '.'))
|
||||
unit = m.group(2).lower()
|
||||
return value, unit
|
||||
|
||||
|
||||
def parse_rate(text: str) -> Tuple[float, str]:
|
||||
# "150 кг/га"
|
||||
m = re.search(r"([0-9]+(?:[\.,][0-9]+)?)\s*([a-zA-Zа-яА-Я/]+)", text)
|
||||
if not m:
|
||||
raise ValueError("rate_not_found")
|
||||
value = float(m.group(1).replace(',', '.'))
|
||||
unit = m.group(2).lower()
|
||||
return value, unit
|
||||
|
||||
|
||||
def normalize_unit(unit: str, unit_map: dict):
|
||||
unit_cf = unit.casefold().strip()
|
||||
for u in unit_map:
|
||||
if u['id'] == unit_cf:
|
||||
return u['id']
|
||||
if unit_cf == u.get('name','').casefold():
|
||||
return u['id']
|
||||
for s in u.get('synonyms', []):
|
||||
if unit_cf == s.casefold():
|
||||
return u['id']
|
||||
return None
|
||||
|
||||
|
||||
def convert(value: float, from_unit: str, to_unit: str, unit_map: dict) -> float:
|
||||
if from_unit == to_unit:
|
||||
return value
|
||||
units = {u['id']: u for u in unit_map}
|
||||
f = units.get(from_unit)
|
||||
t = units.get(to_unit)
|
||||
if not f or not t:
|
||||
raise ValueError("unit_not_found")
|
||||
# only support to_base conversion for now
|
||||
if f.get('to_base') and f['to_base']['base'] == to_unit:
|
||||
return value * float(f['to_base']['factor'])
|
||||
if t.get('to_base') and t['to_base']['base'] == from_unit:
|
||||
return value / float(t['to_base']['factor'])
|
||||
raise ValueError("conversion_not_supported")
|
||||
191
packages/agromatrix-tools/agromatrix_tools/tool_dictionary.py
Normal file
191
packages/agromatrix-tools/agromatrix_tools/tool_dictionary.py
Normal file
@@ -0,0 +1,191 @@
|
||||
import json
|
||||
import os
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
import yaml
|
||||
|
||||
try:
|
||||
from rapidfuzz import process, fuzz
|
||||
HAS_RAPIDFUZZ = True
|
||||
except Exception:
|
||||
HAS_RAPIDFUZZ = False
|
||||
|
||||
from .normalize import normalize_unit as _normalize_unit
|
||||
|
||||
DATA_PATH = Path(os.getenv('AGX_DICTIONARY_PATH', '/opt/microdao-daarion/data/dictionaries/dictionaries.yaml'))
|
||||
PENDING_PATH = Path(os.getenv('AGX_PENDING_PATH', '/opt/microdao-daarion/data/dictionaries/pending.jsonl'))
|
||||
REDACT_KEYS = set(k.strip().lower() for k in os.getenv('AGX_AUDIT_REDACT_KEYS', 'token,secret,password,authorization,cookie,api_key,signature').split(','))
|
||||
|
||||
|
||||
def _load():
|
||||
data = yaml.safe_load(DATA_PATH.read_text(encoding='utf-8'))
|
||||
return data
|
||||
|
||||
|
||||
def _sanitize(obj):
|
||||
if isinstance(obj, dict):
|
||||
out = {}
|
||||
for k, v in obj.items():
|
||||
if k.lower() in REDACT_KEYS:
|
||||
out[k] = '***REDACTED***'
|
||||
else:
|
||||
out[k] = _sanitize(v)
|
||||
return out
|
||||
if isinstance(obj, list):
|
||||
return [_sanitize(v) for v in obj]
|
||||
return obj
|
||||
|
||||
|
||||
def _suggest(term: str, items: list):
|
||||
term_cf = term.casefold().strip()
|
||||
candidates = []
|
||||
for item in items:
|
||||
if item.get('id'):
|
||||
candidates.append((item['id'], item['name'], item['id']))
|
||||
candidates.append((item['id'], item['name'], item['name']))
|
||||
for s in item.get('synonyms', []):
|
||||
candidates.append((item['id'], item['name'], s))
|
||||
|
||||
scores = []
|
||||
if HAS_RAPIDFUZZ:
|
||||
choices = [c[2] for c in candidates]
|
||||
results = process.extract(term_cf, choices, scorer=fuzz.WRatio, limit=5)
|
||||
for match, score, idx in results:
|
||||
item_id, name, _ = candidates[idx]
|
||||
scores.append({"id": item_id, "name": name, "score": score / 100})
|
||||
else:
|
||||
import difflib
|
||||
for item_id, name, cand in candidates:
|
||||
score = difflib.SequenceMatcher(None, term_cf, cand.casefold()).ratio()
|
||||
scores.append({"id": item_id, "name": name, "score": score})
|
||||
scores = sorted(scores, key=lambda x: x['score'], reverse=True)[:5]
|
||||
|
||||
return scores
|
||||
|
||||
|
||||
def _match(term: str, items: list):
|
||||
term_cf = term.casefold().strip()
|
||||
for item in items:
|
||||
if term_cf == item.get('id', '').casefold():
|
||||
return item, 1.0, 'exact_id'
|
||||
if term_cf == item.get('name', '').casefold():
|
||||
return item, 0.98, 'exact_name'
|
||||
for s in item.get('synonyms', []):
|
||||
if term_cf == s.casefold():
|
||||
return item, 0.95, 'synonym'
|
||||
# fuzzy
|
||||
suggestions = _suggest(term, items)
|
||||
if suggestions:
|
||||
top = suggestions[0]
|
||||
if top['score'] >= 0.85:
|
||||
item = next((i for i in items if i.get('id') == top['id']), None)
|
||||
if item:
|
||||
return item, float(top['score']), 'fuzzy'
|
||||
return None, 0.0, 'none'
|
||||
|
||||
|
||||
def _pending(trace_id: str, source: str, category: str, raw_term: str, suggestions: list, note: str = ''):
|
||||
PENDING_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
record = {
|
||||
"ts": datetime.utcnow().isoformat() + 'Z',
|
||||
"trace_id": trace_id,
|
||||
"source": source,
|
||||
"category": category,
|
||||
"raw_term": raw_term,
|
||||
"suggestions": suggestions,
|
||||
"note": note
|
||||
}
|
||||
record = _sanitize(record)
|
||||
with PENDING_PATH.open('a', encoding='utf-8') as f:
|
||||
f.write(json.dumps(record, ensure_ascii=False) + '\n')
|
||||
|
||||
|
||||
def _normalize(term: str, items: list, category: str, trace_id: str, source: str):
|
||||
item, conf, matched_by = _match(term, items)
|
||||
suggestions = _suggest(term, items)
|
||||
if not item or conf < 0.85:
|
||||
_pending(trace_id, source, category, term, suggestions)
|
||||
return {
|
||||
"status": "pending",
|
||||
"normalized_id": None,
|
||||
"canonical_name": None,
|
||||
"confidence": conf,
|
||||
"matched_by": matched_by,
|
||||
"suggestions": suggestions
|
||||
}
|
||||
return {
|
||||
"status": "ok",
|
||||
"normalized_id": item['id'],
|
||||
"canonical_name": item.get('name'),
|
||||
"confidence": conf,
|
||||
"matched_by": matched_by,
|
||||
"suggestions": suggestions
|
||||
}
|
||||
|
||||
|
||||
def normalize_field(term: str, trace_id: str = '', source: str = 'telegram'):
|
||||
data = _load()
|
||||
return _normalize(term, data.get('fields', []), 'field', trace_id, source)
|
||||
|
||||
|
||||
def normalize_crop(term: str, trace_id: str = '', source: str = 'telegram'):
|
||||
data = _load()
|
||||
return _normalize(term, data.get('crops', []), 'crop', trace_id, source)
|
||||
|
||||
|
||||
def normalize_operation(term: str, trace_id: str = '', source: str = 'telegram'):
|
||||
data = _load()
|
||||
return _normalize(term, data.get('operations', []), 'operation', trace_id, source)
|
||||
|
||||
|
||||
def normalize_material(term: str, trace_id: str = '', source: str = 'telegram'):
|
||||
data = _load()
|
||||
return _normalize(term, data.get('materials', []), 'material', trace_id, source)
|
||||
|
||||
|
||||
def normalize_unit(term: str, trace_id: str = '', source: str = 'telegram'):
|
||||
data = _load()
|
||||
unit_id = _normalize_unit(term, data.get('units', []))
|
||||
if not unit_id:
|
||||
suggestions = _suggest(term, data.get('units', []))
|
||||
_pending(trace_id, source, 'unit', term, suggestions)
|
||||
return {
|
||||
"status": "pending",
|
||||
"normalized_id": None,
|
||||
"canonical_name": None,
|
||||
"confidence": 0.0,
|
||||
"matched_by": 'none',
|
||||
"suggestions": suggestions
|
||||
}
|
||||
item = next(u for u in data.get('units', []) if u['id'] == unit_id)
|
||||
return {
|
||||
"status": "ok",
|
||||
"normalized_id": unit_id,
|
||||
"canonical_name": item.get('id'),
|
||||
"confidence": 1.0,
|
||||
"matched_by": 'synonym',
|
||||
"suggestions": _suggest(term, data.get('units', []))
|
||||
}
|
||||
|
||||
|
||||
def normalize_from_text(text: str, trace_id: str = '', source: str = 'telegram'):
|
||||
# naive extractor: split by comma
|
||||
parts = [p.strip() for p in text.split(',') if p.strip()]
|
||||
results = {"fields": [], "crops": [], "operations": [], "materials": [], "units": []}
|
||||
for p in parts:
|
||||
for cat, func in [
|
||||
("fields", normalize_field),
|
||||
("crops", normalize_crop),
|
||||
("operations", normalize_operation),
|
||||
("materials", normalize_material),
|
||||
("units", normalize_unit)
|
||||
]:
|
||||
res = func(p, trace_id=trace_id, source=source)
|
||||
if res['status'] == 'ok':
|
||||
results[cat].append({"term": p, **res})
|
||||
break
|
||||
elif res['status'] == 'pending':
|
||||
results[cat].append({"term": p, **res})
|
||||
break
|
||||
return results
|
||||
@@ -0,0 +1,245 @@
|
||||
import json
|
||||
import os
|
||||
import hashlib
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
import yaml
|
||||
|
||||
PENDING_PATH = Path(os.getenv('AGX_PENDING_PATH', '/opt/microdao-daarion/data/dictionaries/pending.jsonl'))
|
||||
RESOLVED_PATH = Path(os.getenv('AGX_PENDING_RESOLVED_PATH', '/opt/microdao-daarion/data/dictionaries/pending_resolved.jsonl'))
|
||||
DICT_PATH = Path(os.getenv('AGX_DICTIONARY_PATH', '/opt/microdao-daarion/data/dictionaries/dictionaries.yaml'))
|
||||
LOCK_PATH = Path(os.getenv('AGX_DICTIONARY_LOCK', '/opt/microdao-daarion/data/dictionaries/locks/dictionaries.lock'))
|
||||
|
||||
REDACT_KEYS = set(k.strip().lower() for k in os.getenv('AGX_AUDIT_REDACT_KEYS', 'token,secret,password,authorization,cookie,api_key,signature').split(','))
|
||||
|
||||
|
||||
def _now():
|
||||
return datetime.utcnow().isoformat() + 'Z'
|
||||
|
||||
|
||||
def _sanitize(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: ('***REDACTED***' if k.lower() in REDACT_KEYS else _sanitize(v)) for k, v in obj.items()}
|
||||
if isinstance(obj, list):
|
||||
return [_sanitize(v) for v in obj]
|
||||
return obj
|
||||
|
||||
|
||||
def _load_pending():
|
||||
if not PENDING_PATH.exists():
|
||||
return []
|
||||
items = []
|
||||
for i, line in enumerate(RESOLVED_PATH.read_text(encoding='utf-8').split('\n'), start=1):
|
||||
if not line.strip():
|
||||
continue
|
||||
obj = json.loads(line)
|
||||
raw = line.strip()
|
||||
pending_id = 'sha256:' + hashlib.sha256(raw.encode()).hexdigest()
|
||||
items.append({
|
||||
'pending_ref': f"pending.jsonl:{i}",
|
||||
'pending_id': pending_id,
|
||||
**obj
|
||||
})
|
||||
return items
|
||||
|
||||
|
||||
def _load_resolved():
|
||||
if not RESOLVED_PATH.exists():
|
||||
return []
|
||||
items = []
|
||||
for i, line in enumerate(RESOLVED_PATH.read_text(encoding='utf-8').split('\n'), start=1):
|
||||
if not line.strip():
|
||||
continue
|
||||
items.append(json.loads(line))
|
||||
return items
|
||||
|
||||
|
||||
def get_pending_detail(pending_ref_or_id: str):
|
||||
pending = _load_pending()
|
||||
resolved = _load_resolved()
|
||||
target = next((p for p in pending if p.get('pending_ref') == pending_ref_or_id or p.get('pending_id') == pending_ref_or_id), None)
|
||||
if not target:
|
||||
return None
|
||||
|
||||
resolved_match = next((r for r in resolved if r.get('pending_id') == target.get('pending_id') or r.get('pending_ref') == target.get('pending_ref')), None)
|
||||
status = 'open'
|
||||
decision = ''
|
||||
reason = ''
|
||||
if resolved_match:
|
||||
status = resolved_match.get('decision') or 'resolved'
|
||||
decision = resolved_match.get('decision') or ''
|
||||
reason = resolved_match.get('reason') or ''
|
||||
|
||||
suggestions = []
|
||||
for s in (target.get('suggestions') or [])[:5]:
|
||||
suggestions.append({
|
||||
'id': s.get('id'),
|
||||
'score': s.get('score')
|
||||
})
|
||||
|
||||
return {
|
||||
'ref': target.get('pending_ref'),
|
||||
'category': target.get('category'),
|
||||
'raw_term': target.get('raw_term'),
|
||||
'ts': target.get('ts'),
|
||||
'suggestions': suggestions,
|
||||
'status': status,
|
||||
'decision': decision,
|
||||
'reason': reason
|
||||
}
|
||||
|
||||
|
||||
def list_pending(limit=50, category=None, status='open'):
|
||||
pending = _load_pending()
|
||||
resolved = _load_resolved()
|
||||
resolved_ids = set(r.get('pending_id') for r in resolved)
|
||||
if status == 'open':
|
||||
pending = [p for p in pending if p['pending_id'] not in resolved_ids]
|
||||
if category:
|
||||
pending = [p for p in pending if p.get('category') == category]
|
||||
return pending[:limit]
|
||||
|
||||
|
||||
def _append_resolved(record: dict):
|
||||
RESOLVED_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
with RESOLVED_PATH.open('a', encoding='utf-8') as f:
|
||||
f.write(json.dumps(_sanitize(record), ensure_ascii=False) + "\n")
|
||||
|
||||
def approve_pending(pending_ref_or_id: str, action: dict, trace_id=None, reviewer='local'):
|
||||
pending = _load_pending()
|
||||
target = next((p for p in pending if p['pending_ref'] == pending_ref_or_id or p['pending_id'] == pending_ref_or_id), None)
|
||||
if not target:
|
||||
raise ValueError('pending_not_found')
|
||||
|
||||
record = {
|
||||
'ts': _now(),
|
||||
'pending_ref': target['pending_ref'],
|
||||
'pending_id': target['pending_id'],
|
||||
'decision': 'approved',
|
||||
'category': target.get('category'),
|
||||
'raw_term': target.get('raw_term'),
|
||||
'action': action,
|
||||
'reviewer': reviewer,
|
||||
'reason': action.get('reason', '') or '',
|
||||
'trace_id': trace_id or ''
|
||||
}
|
||||
_append_resolved(record)
|
||||
return record
|
||||
|
||||
|
||||
def reject_pending(pending_ref_or_id: str, reason: str, trace_id=None, reviewer='local'):
|
||||
pending = _load_pending()
|
||||
target = next((p for p in pending if p['pending_ref'] == pending_ref_or_id or p['pending_id'] == pending_ref_or_id), None)
|
||||
if not target:
|
||||
raise ValueError('pending_not_found')
|
||||
|
||||
record = {
|
||||
'ts': _now(),
|
||||
'pending_ref': target['pending_ref'],
|
||||
'pending_id': target['pending_id'],
|
||||
'decision': 'rejected',
|
||||
'category': target.get('category'),
|
||||
'raw_term': target.get('raw_term'),
|
||||
'action': {'type': 'reject'},
|
||||
'reviewer': reviewer,
|
||||
'reason': reason,
|
||||
'trace_id': trace_id or ''
|
||||
}
|
||||
_append_resolved(record)
|
||||
return record
|
||||
|
||||
|
||||
def _load_dict():
|
||||
return yaml.safe_load(DICT_PATH.read_text(encoding='utf-8'))
|
||||
|
||||
|
||||
def _write_dict(data):
|
||||
LOCK_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
if LOCK_PATH.exists():
|
||||
raise RuntimeError('lock_exists')
|
||||
try:
|
||||
LOCK_PATH.write_text('locked')
|
||||
DICT_PATH.write_text(yaml.safe_dump(data, allow_unicode=True, sort_keys=False), encoding='utf-8')
|
||||
finally:
|
||||
try:
|
||||
LOCK_PATH.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def _add_synonym(data, category, canonical_id, raw_term):
|
||||
items = data.get(category + 's', data.get(category, []))
|
||||
# category names in dict are plural
|
||||
if category in ['field','crop','operation','material','unit']:
|
||||
items = data.get(category + 's', [])
|
||||
for item in items:
|
||||
if item.get('id') == canonical_id:
|
||||
item.setdefault('synonyms', [])
|
||||
if raw_term not in item['synonyms']:
|
||||
item['synonyms'].append(raw_term)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _create_entry(data, category, name, new_id, raw_term):
|
||||
key = category + 's'
|
||||
data.setdefault(key, [])
|
||||
entry = {'id': new_id, 'name': name, 'synonyms': [raw_term]}
|
||||
data[key].append(entry)
|
||||
return entry
|
||||
|
||||
|
||||
def apply_resolutions():
|
||||
resolved = _load_resolved()
|
||||
data = _load_dict()
|
||||
applied = 0
|
||||
for r in resolved:
|
||||
if r.get('decision') != 'approved':
|
||||
continue
|
||||
action = r.get('action', {})
|
||||
raw_term = r.get('raw_term')
|
||||
category = r.get('category')
|
||||
if action.get('type') in ['map_to_existing','add_synonym']:
|
||||
if _add_synonym(data, category, action.get('canonical_id'), raw_term):
|
||||
applied += 1
|
||||
elif action.get('type') == 'create_new_entry':
|
||||
_create_entry(data, category, action.get('canonical_name'), action.get('canonical_id'), raw_term)
|
||||
applied += 1
|
||||
_write_dict(data)
|
||||
return applied
|
||||
|
||||
|
||||
def stats():
|
||||
pending = _load_pending()
|
||||
resolved = _load_resolved()
|
||||
resolved_ids = set(r.get('pending_id') for r in resolved)
|
||||
open_items = [p for p in pending if p['pending_id'] not in resolved_ids]
|
||||
return {
|
||||
'open': len(open_items),
|
||||
'approved': len([r for r in resolved if r.get('decision') == 'approved']),
|
||||
'rejected': len([r for r in resolved if r.get('decision') == 'rejected'])
|
||||
}
|
||||
|
||||
|
||||
def auto_approve(min_score=0.97, category=None, dry_run=True, reviewer='local'):
|
||||
# use suggestions from pending record; approve if top score >= min_score
|
||||
items = list_pending(limit=1000, category=category, status='open')
|
||||
results = []
|
||||
for p in items:
|
||||
suggestions = p.get('suggestions') or []
|
||||
if not suggestions:
|
||||
continue
|
||||
top = suggestions[0]
|
||||
if top.get('score', 0) >= min_score:
|
||||
action = {
|
||||
'type': 'map_to_existing',
|
||||
'canonical_id': top.get('id'),
|
||||
'canonical_name': top.get('name')
|
||||
}
|
||||
if dry_run:
|
||||
results.append({'pending_ref': p['pending_ref'], 'action': action, 'dry_run': True})
|
||||
else:
|
||||
approve_pending(p['pending_ref'], action, reviewer=reviewer)
|
||||
results.append({'pending_ref': p['pending_ref'], 'action': action, 'dry_run': False})
|
||||
return results
|
||||
32
packages/agromatrix-tools/agromatrix_tools/tool_event_bus.py
Normal file
32
packages/agromatrix-tools/agromatrix_tools/tool_event_bus.py
Normal file
@@ -0,0 +1,32 @@
|
||||
import os
|
||||
import time
|
||||
from .audit import audit_tool_call
|
||||
import asyncio
|
||||
import json
|
||||
from nats.aio.client import Client as NATS
|
||||
|
||||
NATS_URL = os.getenv("NATS_URL", "nats://localhost:4222")
|
||||
|
||||
|
||||
async def publish(subject: str, payload: dict):
|
||||
_t = time.time()
|
||||
nc = NATS()
|
||||
await nc.connect(servers=[NATS_URL])
|
||||
await nc.publish(subject, json.dumps(payload).encode())
|
||||
await nc.flush(1)
|
||||
await nc.drain()
|
||||
audit_tool_call("tool_event_bus.publish", {"subject": subject}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
|
||||
|
||||
async def subscribe(subject: str, handler, duration: float = 5.0):
|
||||
nc = NATS()
|
||||
await nc.connect(servers=[NATS_URL])
|
||||
|
||||
async def cb(msg):
|
||||
data = msg.data.decode()
|
||||
handler(subject, data)
|
||||
|
||||
await nc.subscribe(subject, cb=cb)
|
||||
await asyncio.sleep(duration)
|
||||
await nc.drain()
|
||||
audit_tool_call("tool_event_bus.publish", {"subject": subject}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
@@ -0,0 +1,55 @@
|
||||
import os
|
||||
import time
|
||||
from .audit import audit_tool_call
|
||||
import requests
|
||||
from .common import _auth_headers
|
||||
|
||||
FARMOS_BASE_URL = os.getenv("FARMOS_BASE_URL", "http://localhost:8080")
|
||||
|
||||
|
||||
def get_asset(asset_id: str):
|
||||
_t = time.time()
|
||||
url = f"{FARMOS_BASE_URL}/jsonapi/asset/asset/{asset_id}"
|
||||
r = requests.get(url, headers=_auth_headers(), timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_farmos_read.get_asset", {"asset_id": asset_id}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def search_assets(name_contains: str = "", limit: int = 10):
|
||||
_t = time.time()
|
||||
params = {}
|
||||
if name_contains:
|
||||
params["filter[name][condition][path]"] = "name"
|
||||
params["filter[name][condition][operator]"] = "CONTAINS"
|
||||
params["filter[name][condition][value]"] = name_contains
|
||||
params["page[limit]"] = limit
|
||||
url = f"{FARMOS_BASE_URL}/jsonapi/asset/asset"
|
||||
r = requests.get(url, headers=_auth_headers(), params=params, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_farmos_read.get_asset", {"asset_id": asset_id}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def read_logs(log_type: str = "observation", limit: int = 10):
|
||||
_t = time.time()
|
||||
url = f"{FARMOS_BASE_URL}/jsonapi/log/{log_type}"
|
||||
params = {"page[limit]": limit}
|
||||
r = requests.get(url, headers=_auth_headers(), params=params, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_farmos_read.get_asset", {"asset_id": asset_id}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def read_inventory(limit: int = 10):
|
||||
_t = time.time()
|
||||
url = f"{FARMOS_BASE_URL}/jsonapi/log/inventory"
|
||||
params = {"page[limit]": limit}
|
||||
r = requests.get(url, headers=_auth_headers(), params=params, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_farmos_read.get_asset", {"asset_id": asset_id}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
@@ -0,0 +1,75 @@
|
||||
import os
|
||||
import json
|
||||
import hmac
|
||||
import time
|
||||
import uuid
|
||||
import hashlib
|
||||
import requests
|
||||
from .audit import audit_tool_call
|
||||
|
||||
INTEGRATION_BASE_URL = os.getenv("INTEGRATION_BASE_URL", "http://localhost:8800")
|
||||
AGX_HMAC_SECRET = os.getenv("AGX_HMAC_SECRET", "")
|
||||
|
||||
|
||||
def _sign(body: dict):
|
||||
if not AGX_HMAC_SECRET:
|
||||
return {}, json.dumps(body)
|
||||
ts = str(int(time.time() * 1000))
|
||||
nonce = str(uuid.uuid4())
|
||||
body_json = json.dumps(body, separators=(",", ":"), sort_keys=True)
|
||||
payload = f"{ts}.{nonce}.{body_json}"
|
||||
sig = hmac.new(AGX_HMAC_SECRET.encode(), payload.encode(), hashlib.sha256).hexdigest()
|
||||
headers = {
|
||||
"X-AGX-SIGNATURE": sig,
|
||||
"X-AGX-TIMESTAMP": ts,
|
||||
"X-AGX-NONCE": nonce,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
trace_id = os.getenv("AGX_TRACE_ID", "")
|
||||
if trace_id:
|
||||
headers["X-AGX-TRACE-ID"] = trace_id
|
||||
return headers, body_json
|
||||
|
||||
|
||||
def write_observation(assetRef: dict, observation: dict):
|
||||
_t = time.time()
|
||||
payload = {"assetRef": assetRef, **observation}
|
||||
headers, body = _sign(payload)
|
||||
r = requests.post(f"{INTEGRATION_BASE_URL}/write/observation", data=body, headers=headers, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_integration_write.write_observation", {"assetRef": assetRef}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def write_event(assetRef: dict, event: dict):
|
||||
_t = time.time()
|
||||
payload = {"assetRef": assetRef, **event}
|
||||
headers, body = _sign(payload)
|
||||
r = requests.post(f"{INTEGRATION_BASE_URL}/write/event", data=body, headers=headers, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_integration_write.write_event", {"assetRef": assetRef}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def write_tasklog(assetRef: dict, tasklog: dict):
|
||||
_t = time.time()
|
||||
payload = {"assetRef": assetRef, **tasklog}
|
||||
headers, body = _sign(payload)
|
||||
r = requests.post(f"{INTEGRATION_BASE_URL}/write/tasklog", data=body, headers=headers, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_integration_write.write_tasklog", {"assetRef": assetRef}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def write_inventory_movement(assetRef: dict, movement: dict):
|
||||
_t = time.time()
|
||||
payload = {"assetRef": assetRef, **movement}
|
||||
headers, body = _sign(payload)
|
||||
r = requests.post(f"{INTEGRATION_BASE_URL}/write/inventory", data=body, headers=headers, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_integration_write.write_inventory_movement", {"assetRef": assetRef}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
@@ -0,0 +1,16 @@
|
||||
import os
|
||||
import time
|
||||
from .audit import audit_tool_call
|
||||
import requests
|
||||
|
||||
LITEFARM_BASE_URL = os.getenv("LITEFARM_BASE_URL", "http://localhost:9000")
|
||||
|
||||
|
||||
def export_aggregates():
|
||||
_t = time.time()
|
||||
url = f"{LITEFARM_BASE_URL}/api/aggregates"
|
||||
r = requests.get(url, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_litefarm_read.export_aggregates", {}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
@@ -0,0 +1,202 @@
|
||||
import json
|
||||
import os
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
|
||||
from . import tool_integration_write
|
||||
|
||||
DATA_PATH = Path(os.getenv('AGX_OPERATION_PATH', '/opt/microdao-daarion/data/operations/operation_plans.jsonl'))
|
||||
|
||||
STATUS_FLOW = {
|
||||
'planned': ['scheduled', 'cancelled'],
|
||||
'scheduled': ['in_progress', 'cancelled'],
|
||||
'in_progress': ['done', 'cancelled'],
|
||||
'done': ['verified', 'closed'],
|
||||
'verified': ['closed'],
|
||||
'closed': [],
|
||||
'cancelled': []
|
||||
}
|
||||
|
||||
|
||||
def _now():
|
||||
return datetime.utcnow().isoformat() + 'Z'
|
||||
|
||||
|
||||
def _append_event(ev: dict):
|
||||
DATA_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
with DATA_PATH.open('a', encoding='utf-8') as f:
|
||||
f.write(json.dumps(ev, ensure_ascii=False) + '\n')
|
||||
|
||||
|
||||
def _load_events():
|
||||
if not DATA_PATH.exists():
|
||||
return []
|
||||
events = []
|
||||
for line in DATA_PATH.read_text(encoding='utf-8').split('\n'):
|
||||
if not line.strip():
|
||||
continue
|
||||
events.append(json.loads(line))
|
||||
return events
|
||||
|
||||
|
||||
def _project():
|
||||
plans = {}
|
||||
for ev in _load_events():
|
||||
pid = ev.get('plan_id')
|
||||
if ev['type'] == 'create_plan':
|
||||
plans[pid] = ev['payload']
|
||||
elif ev['type'] == 'update_plan' and pid in plans:
|
||||
plans[pid].update(ev['payload'])
|
||||
plans[pid]['updated_ts'] = ev['ts']
|
||||
elif ev['type'] == 'set_status' and pid in plans:
|
||||
plans[pid]['status'] = ev['payload']['status']
|
||||
plans[pid]['updated_ts'] = ev['ts']
|
||||
elif ev['type'] == 'record_fact' and pid in plans:
|
||||
plans[pid].setdefault('fact_events', []).append(ev['payload'])
|
||||
plans[pid]['updated_ts'] = ev['ts']
|
||||
return plans
|
||||
|
||||
|
||||
def _new_id(prefix: str):
|
||||
return f"{prefix}_{uuid.uuid4().hex[:8]}"
|
||||
|
||||
|
||||
def create_plan(plan_spec: dict, trace_id: str = '', source: str = 'telegram'):
|
||||
plan_id = f"opplan_{datetime.utcnow().strftime('%Y%m%d')}_{uuid.uuid4().hex[:6]}"
|
||||
plan = {
|
||||
'plan_id': plan_id,
|
||||
'created_ts': _now(),
|
||||
'updated_ts': _now(),
|
||||
'trace_id': trace_id,
|
||||
'source': source,
|
||||
'status': 'planned',
|
||||
'scope': plan_spec.get('scope', {}),
|
||||
'tasks': [],
|
||||
'fact_events': []
|
||||
}
|
||||
for task in plan_spec.get('tasks', []):
|
||||
t = task.copy()
|
||||
t['task_id'] = t.get('task_id') or _new_id('task')
|
||||
plan['tasks'].append(t)
|
||||
|
||||
_append_event({
|
||||
'ts': _now(),
|
||||
'type': 'create_plan',
|
||||
'plan_id': plan_id,
|
||||
'trace_id': trace_id,
|
||||
'payload': plan
|
||||
})
|
||||
return plan_id
|
||||
|
||||
|
||||
def list_plans(filters: dict | None = None):
|
||||
plans = list(_project().values())
|
||||
if not filters:
|
||||
return plans
|
||||
status = filters.get('status')
|
||||
if status:
|
||||
plans = [p for p in plans if p.get('status') == status]
|
||||
return plans
|
||||
|
||||
|
||||
def get_plan(plan_id: str):
|
||||
return _project().get(plan_id)
|
||||
|
||||
|
||||
def update_plan(plan_id: str, patch: dict, trace_id: str = ''):
|
||||
_append_event({
|
||||
'ts': _now(),
|
||||
'type': 'update_plan',
|
||||
'plan_id': plan_id,
|
||||
'trace_id': trace_id,
|
||||
'payload': patch
|
||||
})
|
||||
return True
|
||||
|
||||
|
||||
def set_status(plan_id: str, status: str, trace_id: str = ''):
|
||||
plan = get_plan(plan_id)
|
||||
if not plan:
|
||||
raise ValueError('plan_not_found')
|
||||
current = plan.get('status')
|
||||
if status not in STATUS_FLOW.get(current, []):
|
||||
raise ValueError('invalid_transition')
|
||||
_append_event({
|
||||
'ts': _now(),
|
||||
'type': 'set_status',
|
||||
'plan_id': plan_id,
|
||||
'trace_id': trace_id,
|
||||
'payload': {'status': status}
|
||||
})
|
||||
return True
|
||||
|
||||
|
||||
def record_fact(plan_id: str, fact_event: dict, trace_id: str = ''):
|
||||
plan = get_plan(plan_id)
|
||||
if not plan:
|
||||
raise ValueError('plan_not_found')
|
||||
fact = fact_event.copy()
|
||||
fact['fact_id'] = fact.get('fact_id') or _new_id('fact')
|
||||
fact.setdefault('farmos_write', {'status': 'pending', 'ref': ''})
|
||||
|
||||
# write to farmOS via integration service (single-writer)
|
||||
try:
|
||||
tool_integration_write.write_tasklog(
|
||||
{'source': 'operation_plan', 'deviceId': fact.get('field_id', '')},
|
||||
{
|
||||
'task': fact.get('operation_id', ''),
|
||||
'status': 'done',
|
||||
'ts': int(datetime.utcnow().timestamp() * 1000),
|
||||
'notes': json.dumps(fact.get('fact', {}), ensure_ascii=False)
|
||||
}
|
||||
)
|
||||
fact['farmos_write'] = {'status': 'ok', 'ref': ''}
|
||||
except Exception:
|
||||
fact['farmos_write'] = {'status': 'failed', 'ref': ''}
|
||||
|
||||
_append_event({
|
||||
'ts': _now(),
|
||||
'type': 'record_fact',
|
||||
'plan_id': plan_id,
|
||||
'trace_id': trace_id,
|
||||
'payload': fact
|
||||
})
|
||||
return True
|
||||
|
||||
|
||||
def plan_dashboard(date_range: dict | None = None, filters: dict | None = None):
|
||||
plans = list_plans(filters)
|
||||
counts = {'planned': 0, 'in_progress': 0, 'done': 0, 'overdue': 0}
|
||||
critical_tasks = []
|
||||
today = datetime.utcnow().date().isoformat()
|
||||
|
||||
for p in plans:
|
||||
status = p.get('status')
|
||||
if status in counts:
|
||||
counts[status] += 1
|
||||
for t in p.get('tasks', []):
|
||||
planned_date = t.get('planned_date')
|
||||
if planned_date and planned_date < today and status not in ['done', 'closed', 'verified']:
|
||||
counts['overdue'] += 1
|
||||
critical_tasks.append({
|
||||
'field_id': p.get('scope', {}).get('field_ids', [''])[0],
|
||||
'operation_id': t.get('operation_id'),
|
||||
'planned_date': planned_date,
|
||||
'reason': 'overdue'
|
||||
})
|
||||
if t.get('priority') == 'critical':
|
||||
critical_tasks.append({
|
||||
'field_id': p.get('scope', {}).get('field_ids', [''])[0],
|
||||
'operation_id': t.get('operation_id'),
|
||||
'planned_date': planned_date,
|
||||
'reason': 'critical'
|
||||
})
|
||||
|
||||
return {
|
||||
'status': 'ok',
|
||||
'date_range': date_range or {},
|
||||
'counts': counts,
|
||||
'critical_tasks': critical_tasks,
|
||||
'plan_vs_fact': []
|
||||
}
|
||||
104
packages/agromatrix-tools/agromatrix_tools/tool_spreadsheet.py
Normal file
104
packages/agromatrix-tools/agromatrix_tools/tool_spreadsheet.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
from .audit import audit_tool_call
|
||||
import subprocess
|
||||
from typing import List, Any
|
||||
|
||||
import pandas as pd
|
||||
from openpyxl import load_workbook, Workbook
|
||||
from openpyxl.utils import range_boundaries
|
||||
from openpyxl.worksheet.table import Table, TableStyleInfo
|
||||
|
||||
|
||||
def open_workbook(path: str):
|
||||
_t = time.time()
|
||||
wb = load_workbook(path)
|
||||
wb.close()
|
||||
out = {"path": path, "sheets": wb.sheetnames}
|
||||
audit_tool_call("tool_spreadsheet.open_workbook", {"path": path}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def read_range(path: str, sheet: str, a1_range: str):
|
||||
wb = load_workbook(path, data_only=True)
|
||||
ws = wb[sheet]
|
||||
min_col, min_row, max_col, max_row = range_boundaries(a1_range)
|
||||
data = []
|
||||
for row in ws.iter_rows(min_row=min_row, max_row=max_row, min_col=min_col, max_col=max_col, values_only=True):
|
||||
data.append(list(row))
|
||||
wb.close()
|
||||
return {"sheet": sheet, "range": a1_range, "values": data}
|
||||
|
||||
|
||||
def write_range(path: str, sheet: str, start_cell: str, values: List[List[Any]], style: dict | None = None):
|
||||
wb = load_workbook(path)
|
||||
ws = wb[sheet]
|
||||
start_col = ws[start_cell].column
|
||||
start_row = ws[start_cell].row
|
||||
for r_idx, row in enumerate(values):
|
||||
for c_idx, val in enumerate(row):
|
||||
ws.cell(row=start_row + r_idx, column=start_col + c_idx, value=val)
|
||||
wb.save(path)
|
||||
wb.close()
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
def add_sheet(path: str, name: str):
|
||||
wb = load_workbook(path)
|
||||
if name in wb.sheetnames:
|
||||
wb.close()
|
||||
return {"status": "exists"}
|
||||
wb.create_sheet(title=name)
|
||||
wb.save(path)
|
||||
wb.close()
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
def create_workbook(path: str, template_spec_json: dict):
|
||||
wb = Workbook()
|
||||
wb.remove(wb.active)
|
||||
for sheet_spec in template_spec_json.get("sheets", []):
|
||||
ws = wb.create_sheet(title=sheet_spec.get("name", "Sheet"))
|
||||
data = sheet_spec.get("data", [])
|
||||
for row in data:
|
||||
ws.append(row)
|
||||
wb.save(path)
|
||||
wb.close()
|
||||
return {"status": "ok", "path": path}
|
||||
|
||||
|
||||
def apply_table(path: str, sheet: str, a1_range: str, table_name: str):
|
||||
wb = load_workbook(path)
|
||||
ws = wb[sheet]
|
||||
table = Table(displayName=table_name, ref=a1_range)
|
||||
style = TableStyleInfo(name="TableStyleMedium9", showRowStripes=True)
|
||||
table.tableStyleInfo = style
|
||||
ws.add_table(table)
|
||||
wb.save(path)
|
||||
wb.close()
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
def validate(path: str):
|
||||
try:
|
||||
load_workbook(path).close()
|
||||
return []
|
||||
except Exception as e:
|
||||
return [str(e)]
|
||||
|
||||
|
||||
def save_as(path: str, out_path: str):
|
||||
wb = load_workbook(path)
|
||||
wb.save(out_path)
|
||||
wb.close()
|
||||
return {"status": "ok", "out_path": out_path}
|
||||
|
||||
|
||||
def convert_to_pdf(path: str, out_pdf_path: str):
|
||||
cmd = ["soffice", "--headless", "--convert-to", "pdf", "--outdir", os.path.dirname(out_pdf_path), path]
|
||||
try:
|
||||
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
return {"status": "ok", "out_pdf_path": out_pdf_path}
|
||||
except Exception as e:
|
||||
return {"status": "error", "error": str(e)}
|
||||
@@ -0,0 +1,21 @@
|
||||
import os
|
||||
import time
|
||||
from .audit import audit_tool_call
|
||||
import subprocess
|
||||
|
||||
TANIA_BASE_URL = os.getenv("TANIA_BASE_URL", "http://localhost:3000")
|
||||
|
||||
|
||||
def check():
|
||||
_t = time.time()
|
||||
out = {"url": TANIA_BASE_URL}
|
||||
audit_tool_call("tool_tania_control.check", {}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def start():
|
||||
return {"status": "noop"}
|
||||
|
||||
|
||||
def stop():
|
||||
return {"status": "noop"}
|
||||
@@ -0,0 +1,35 @@
|
||||
import os
|
||||
import time
|
||||
from .audit import audit_tool_call
|
||||
import requests
|
||||
|
||||
THINGSBOARD_BASE_URL = os.getenv("THINGSBOARD_BASE_URL", "http://localhost:8081")
|
||||
THINGSBOARD_TOKEN = os.getenv("THINGSBOARD_TOKEN", "")
|
||||
|
||||
|
||||
def _headers():
|
||||
if THINGSBOARD_TOKEN:
|
||||
return {"X-Authorization": f"Bearer {THINGSBOARD_TOKEN}"}
|
||||
return {}
|
||||
|
||||
|
||||
def get_latest_telemetry(device_id: str, metric: str, window: int = 3600):
|
||||
_t = time.time()
|
||||
# Using deviceId with JWT token (admin). If using device token, adapt externally.
|
||||
url = f"{THINGSBOARD_BASE_URL}/api/plugins/telemetry/DEVICE/{device_id}/values/timeseries"
|
||||
params = {"keys": metric, "startTs": "", "endTs": ""}
|
||||
r = requests.get(url, headers=_headers(), params=params, timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_thingsboard_read.get_latest_telemetry", {"device_id": device_id, "metric": metric}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
|
||||
|
||||
def get_device_status(device_id: str):
|
||||
_t = time.time()
|
||||
url = f"{THINGSBOARD_BASE_URL}/api/device/info/{device_id}"
|
||||
r = requests.get(url, headers=_headers(), timeout=20)
|
||||
r.raise_for_status()
|
||||
out = r.json()
|
||||
audit_tool_call("tool_thingsboard_read.get_latest_telemetry", {"device_id": device_id, "metric": metric}, {"ok": True}, True, int((time.time()-_t)*1000))
|
||||
return out
|
||||
8
packages/agromatrix-tools/requirements.txt
Normal file
8
packages/agromatrix-tools/requirements.txt
Normal file
@@ -0,0 +1,8 @@
|
||||
requests>=2.31.0
|
||||
pandas>=2.2.2
|
||||
openpyxl>=3.1.5
|
||||
XlsxWriter>=3.2.0
|
||||
nats-py>=2.7.2
|
||||
|
||||
PyYAML>=6.0.2
|
||||
rapidfuzz>=3.9.6
|
||||
Reference in New Issue
Block a user