diff --git a/config/nodes_registry.yml b/config/nodes_registry.yml new file mode 100644 index 00000000..87efcc0f --- /dev/null +++ b/config/nodes_registry.yml @@ -0,0 +1,64 @@ +defaults: + health_timeout_sec: 10 + tools_timeout_sec: 30 + # Per-node timeout defaults (overridable per-node) + gateway_timeout_ms: 2500 # ms for gateway health/agent fetch + apply_timeout_ms: 10000 # ms for apply POST + get_retry: 1 # max retries for GET (health check) + post_retry: 0 # no retry for mutating calls +nodes: + NODA1: + label: Production (NODA1) + node_role: prod # prod = always-on, higher timeouts + gateway_timeout_ms: 2500 + apply_timeout_ms: 10000 + router_url: http://144.76.224.179:9102 + gateway_url: http://144.76.224.179:9300 + monitor_url: http://144.76.224.179:9102 + supervisor_url: '' + ssh: + host: 144.76.224.179 + ipv6: 2a01:4f8:201:2a6::2 + port: 22 + user: root + auth: + password_env: NODES_NODA1_SSH_PASSWORD + host_keys: + - type: rsa + bits: 3072 + sha256: OzbVMM7CC4SatdE2CSoxh5qgJdCyYO22MLjchXXBIro + - type: ecdsa + bits: 256 + sha256: YPQUigtDm3HiEp4MYYeREE+M3ig/2CrZXy2ozr4OWQw + - type: ed25519 + bits: 256 + sha256: 79LG0tKQ1B1DsdVZ/BhLYSX2v08eCWqqWihHtn+Y8FU + NODA2: + label: Control Plane (NODA2 · MacBook) + node_role: dev # dev = optional, short timeout, canary-default + gateway_timeout_ms: 1000 # fast timeout — dev laptop may sleep/NAT + apply_timeout_ms: 5000 + get_retry: 1 + post_retry: 0 + router_url: http://127.0.0.1:9102 + gateway_url: http://127.0.0.1:9300 + monitor_url: http://127.0.0.1:9102 + supervisor_url: http://127.0.0.1:8084 + NODA3: + label: AI/ML Experiments (NODA3) + node_role: dev + gateway_timeout_ms: 800 + router_url: '' + gateway_url: '' + monitor_url: '' + supervisor_url: '' + enabled: false + NODA4: + label: Reserve Node (NODA4) + node_role: dev + gateway_timeout_ms: 1500 + router_url: http://10.0.0.44:9102 + gateway_url: http://10.0.0.44:9300 + monitor_url: http://10.0.0.44:9102 + supervisor_url: '' + enabled: false diff --git a/config/rbac_tools_matrix.yml b/config/rbac_tools_matrix.yml new file mode 100644 index 00000000..5ee2a233 --- /dev/null +++ b/config/rbac_tools_matrix.yml @@ -0,0 +1,507 @@ +# RBAC Tools Matrix +# Maps tool → action → entitlements required +# Enforced by tool_governance.py in gateway dispatch +# +# Entitlement format: tools.. +# Agents/users must have ALL listed entitlements to perform an action. + +tools: + + repo_tool: + actions: + tree: + entitlements: ["tools.repo.read"] + read: + entitlements: ["tools.repo.read"] + search: + entitlements: ["tools.repo.read"] + metadata: + entitlements: ["tools.repo.read"] + + kb_tool: + actions: + search: + entitlements: ["tools.kb.read"] + snippets: + entitlements: ["tools.kb.read"] + open: + entitlements: ["tools.kb.read"] + sources: + entitlements: ["tools.kb.read"] + + oncall_tool: + actions: + services_list: + entitlements: ["tools.oncall.read"] + service_health: + entitlements: ["tools.oncall.read"] + service_status: + entitlements: ["tools.oncall.read"] + runbook_search: + entitlements: ["tools.oncall.read"] + runbook_read: + entitlements: ["tools.oncall.read"] + deployments_recent: + entitlements: ["tools.oncall.read"] + incident_list: + entitlements: ["tools.oncall.read"] + incident_get: + entitlements: ["tools.oncall.read"] + incident_create: + entitlements: ["tools.oncall.incident_write"] + incident_close: + entitlements: ["tools.oncall.incident_write"] + incident_append_event: + entitlements: ["tools.oncall.incident_write"] + incident_attach_artifact: + entitlements: ["tools.oncall.incident_write"] + incident_followups_summary: + entitlements: ["tools.oncall.read"] + alert_to_incident: + entitlements: ["tools.oncall.incident_write", "tools.alerts.read", "tools.alerts.ack"] + + incident_escalation_tool: + actions: + evaluate: + entitlements: ["tools.oncall.incident_write"] + auto_resolve_candidates: + entitlements: ["tools.oncall.incident_write"] + + risk_engine_tool: + actions: + service: + entitlements: ["tools.risk.read"] + dashboard: + entitlements: ["tools.risk.read"] + policy: + entitlements: ["tools.risk.read"] + + risk_history_tool: + actions: + snapshot: + entitlements: ["tools.risk.write"] + cleanup: + entitlements: ["tools.risk.write"] + series: + entitlements: ["tools.risk.read"] + digest: + entitlements: ["tools.risk.write"] + + backlog_tool: + actions: + list: + entitlements: ["tools.backlog.read"] + get: + entitlements: ["tools.backlog.read"] + dashboard: + entitlements: ["tools.backlog.read"] + create: + entitlements: ["tools.backlog.write"] + upsert: + entitlements: ["tools.backlog.write"] + set_status: + entitlements: ["tools.backlog.write"] + add_comment: + entitlements: ["tools.backlog.write"] + close: + entitlements: ["tools.backlog.write"] + auto_generate_weekly: + entitlements: ["tools.backlog.admin"] + cleanup: + entitlements: ["tools.backlog.admin"] + + architecture_pressure_tool: + actions: + service: + entitlements: ["tools.pressure.read"] + dashboard: + entitlements: ["tools.pressure.read"] + digest: + entitlements: ["tools.pressure.write"] + + incident_intelligence_tool: + actions: + correlate: + entitlements: ["tools.oncall.read"] + recurrence: + entitlements: ["tools.oncall.read"] + buckets: + entitlements: ["tools.oncall.read"] + weekly_digest: + entitlements: ["tools.oncall.incident_write"] # writes FS artifacts + autofollowups + + alert_ingest_tool: + actions: + ingest: + entitlements: ["tools.alerts.ingest"] + list: + entitlements: ["tools.alerts.read"] + get: + entitlements: ["tools.alerts.read"] + ack: + entitlements: ["tools.alerts.ack"] + claim: + entitlements: ["tools.alerts.claim"] + fail: + entitlements: ["tools.alerts.ack"] + + observability_tool: + actions: + metrics_query: + entitlements: ["tools.observability.read"] + metrics_range: + entitlements: ["tools.observability.read"] + logs_query: + entitlements: ["tools.observability.read"] + traces_query: + entitlements: ["tools.observability.traces"] + service_overview: + entitlements: ["tools.observability.read"] + slo_snapshot: + entitlements: ["tools.observability.read"] + + monitor_tool: + actions: + status: + entitlements: ["tools.monitor.read"] + + pr_reviewer_tool: + actions: + review: + entitlements: ["tools.pr_review.use"] + gate: + entitlements: ["tools.pr_review.gate"] + + contract_tool: + actions: + lint_openapi: + entitlements: ["tools.contract.use"] + diff_openapi: + entitlements: ["tools.contract.use"] + generate_client_stub: + entitlements: ["tools.contract.use"] + gate: + entitlements: ["tools.contract.gate"] + + config_linter_tool: + actions: + lint: + entitlements: ["tools.config_lint.use"] + gate: + entitlements: ["tools.config_lint.gate"] + + threatmodel_tool: + actions: + analyze_service: + entitlements: ["tools.threatmodel.use"] + analyze_diff: + entitlements: ["tools.threatmodel.use"] + generate_checklist: + entitlements: ["tools.threatmodel.use"] + gate: + entitlements: ["tools.threatmodel.gate"] + + job_orchestrator_tool: + actions: + list_tasks: + entitlements: ["tools.jobs.use"] + start_task: + entitlements: ["tools.jobs.use"] + get_job: + entitlements: ["tools.jobs.use"] + cancel_job: + entitlements: ["tools.jobs.cancel"] + + memory_search: + actions: + _default: + entitlements: ["tools.memory.read"] + + graph_query: + actions: + _default: + entitlements: ["tools.memory.read"] + + remember_fact: + actions: + _default: + entitlements: ["tools.memory.write"] + + web_search: + actions: + _default: + entitlements: ["tools.web.read"] + + web_extract: + actions: + _default: + entitlements: ["tools.web.read"] + + crawl4ai_scrape: + actions: + _default: + entitlements: ["tools.web.read"] + + image_generate: + actions: + _default: + entitlements: ["tools.media.generate"] + + comfy_generate_image: + actions: + _default: + entitlements: ["tools.media.generate"] + + comfy_generate_video: + actions: + _default: + entitlements: ["tools.media.generate"] + + tts_speak: + actions: + _default: + entitlements: ["tools.media.generate"] + + presentation_create: + actions: + _default: + entitlements: ["tools.docs.create"] + + presentation_status: + actions: + _default: + entitlements: ["tools.docs.create"] + + presentation_download: + actions: + _default: + entitlements: ["tools.docs.create"] + + file_tool: + actions: + _default: + entitlements: ["tools.docs.create"] + + market_data: + actions: + _default: + entitlements: ["tools.market.read"] + + data_governance_tool: + actions: + digest_audit: + entitlements: ["tools.data_gov.read"] + scan_repo: + entitlements: ["tools.data_gov.read"] + scan_audit: + entitlements: ["tools.data_gov.read"] + retention_check: + entitlements: ["tools.data_gov.read"] + policy: + entitlements: ["tools.data_gov.read"] + gate: + entitlements: ["tools.data_gov.gate"] + + cost_analyzer_tool: + actions: + digest: + entitlements: ["tools.cost.read"] + report: + entitlements: ["tools.cost.read"] + top: + entitlements: ["tools.cost.read"] + anomalies: + entitlements: ["tools.cost.read"] + weights: + entitlements: ["tools.cost.read"] + gate: + entitlements: ["tools.cost.gate"] + + dependency_scanner_tool: + actions: + scan: + entitlements: ["tools.deps.read"] + gate: + entitlements: ["tools.deps.gate"] + + drift_analyzer_tool: + actions: + analyze: + entitlements: ["tools.drift.read"] + gate: + entitlements: ["tools.drift.gate"] + + calendar_tool: + actions: + connect: + entitlements: ["tools.calendar.use"] + list_calendars: + entitlements: ["tools.calendar.use"] + list_events: + entitlements: ["tools.calendar.use"] + get_event: + entitlements: ["tools.calendar.use"] + create_event: + entitlements: ["tools.calendar.use"] + update_event: + entitlements: ["tools.calendar.use"] + delete_event: + entitlements: ["tools.calendar.use"] + set_reminder: + entitlements: ["tools.calendar.use"] + + agent_email_tool: + actions: + create_inbox: + entitlements: ["tools.email.use"] + list_inboxes: + entitlements: ["tools.email.use"] + delete_inbox: + entitlements: ["tools.email.use"] + send: + entitlements: ["tools.email.use"] + receive: + entitlements: ["tools.email.use"] + analyze_email: + entitlements: ["tools.email.use"] + + browser_tool: + actions: + _default: + entitlements: ["tools.browser.use"] + + safe_code_executor_tool: + actions: + _default: + entitlements: ["tools.exec.safe"] + + secure_vault_tool: + actions: + _default: + entitlements: ["tools.vault.manage"] + +# ─── Role → Entitlements ───────────────────────────────────────────────────── +# Lists which entitlements each role has. +# Used by tool_governance.py to resolve agent role → entitlement set. + +role_entitlements: + agent_default: + - tools.repo.read + - tools.kb.read + - tools.oncall.read + - tools.observability.read + - tools.memory.read + - tools.memory.write + - tools.web.read + - tools.media.generate + - tools.docs.create + - tools.jobs.use + + agent_cto: + - tools.repo.read + - tools.kb.read + - tools.oncall.read + - tools.oncall.incident_write + - tools.alerts.ingest + - tools.alerts.read + - tools.alerts.ack + - tools.alerts.claim + - tools.observability.read + - tools.observability.traces + - tools.monitor.read + - tools.memory.read + - tools.memory.write + - tools.web.read + - tools.media.generate + - tools.docs.create + - tools.pr_review.use + - tools.pr_review.gate + - tools.contract.use + - tools.contract.gate + - tools.config_lint.use + - tools.config_lint.gate + - tools.threatmodel.use + - tools.threatmodel.gate + - tools.jobs.use + - tools.jobs.cancel + - tools.jobs.run.smoke + - tools.jobs.run.drift + - tools.jobs.run.backup + - tools.jobs.run.migrate + - tools.jobs.run.deploy + - tools.jobs.run.ops + - tools.deps.read + - tools.deps.gate + - tools.cost.read + - tools.cost.gate + - tools.data_gov.read + - tools.data_gov.gate + - tools.drift.read + - tools.drift.gate + - tools.risk.read + - tools.risk.write + - tools.pressure.read + - tools.pressure.write + - tools.backlog.read + - tools.backlog.write + - tools.backlog.admin + - tools.calendar.use + - tools.email.use + - tools.browser.use + - tools.exec.safe + - tools.vault.manage + + agent_oncall: + - tools.repo.read + - tools.kb.read + - tools.oncall.read + - tools.oncall.incident_write + - tools.alerts.read + - tools.alerts.ack + - tools.alerts.claim + - tools.observability.read + - tools.monitor.read + - tools.memory.read + - tools.web.read + - tools.jobs.use + - tools.jobs.run.smoke + - tools.jobs.run.drift + - tools.jobs.run.ops + - tools.deps.read + - tools.drift.read + - tools.cost.read + - tools.data_gov.read + - tools.risk.read + - tools.risk.write + - tools.pressure.read + - tools.backlog.read + - tools.backlog.write + + agent_media: + - tools.repo.read + - tools.kb.read + - tools.oncall.read + - tools.observability.read + - tools.memory.read + - tools.memory.write + - tools.web.read + - tools.media.generate + - tools.docs.create + - tools.jobs.use + + agent_monitor: + # Read-only: observability, health, KB — no incident write, no jobs + # Can INGEST alerts (detect → alert), but NOT create incidents + - tools.oncall.read + - tools.observability.read + - tools.monitor.read + - tools.kb.read + - tools.alerts.ingest + - tools.risk.read + + agent_interface: + # Minimal: KB + incident list/get + alert list/get + backlog read (read-only) + - tools.kb.read + - tools.oncall.read + - tools.alerts.read + - tools.backlog.read diff --git a/config/tool_limits.yml b/config/tool_limits.yml new file mode 100644 index 00000000..5c5378a5 --- /dev/null +++ b/config/tool_limits.yml @@ -0,0 +1,339 @@ +# Tool Safety Limits Configuration +# Controls per-tool: timeout, input/output sizes, rate limits, concurrency. +# Applied by tool_governance.py middleware before dispatch. + +# ─── Global Defaults ───────────────────────────────────────────────────────── +defaults: + timeout_ms: 30000 # 30s + max_chars_in: 200000 # 200KB input + max_bytes_out: 524288 # 512KB output + rate_limit_rpm: 60 # 60 req/min per agent + concurrency: 5 # max parallel calls per agent + +# ─── Per-Tool Overrides ─────────────────────────────────────────────────────── +tools: + + repo_tool: + timeout_ms: 10000 + max_chars_in: 10000 # path + params only, not file content input + max_bytes_out: 524288 # 512KB (file read result) + rate_limit_rpm: 120 + + kb_tool: + timeout_ms: 15000 + max_chars_in: 5000 + max_bytes_out: 262144 # 256KB + rate_limit_rpm: 60 + + oncall_tool: + timeout_ms: 10000 + max_chars_in: 10000 + max_bytes_out: 131072 # 128KB + rate_limit_rpm: 30 + concurrency: 3 + actions: + incident_followups_summary: + timeout_ms: 10000 + rate_limit_rpm: 10 + + incident_escalation_tool: + timeout_ms: 30000 + max_bytes_out: 524288 + rate_limit_rpm: 10 + + risk_engine_tool: + actions: + service: + timeout_seconds: 10 + rpm: 20 + dashboard: + timeout_seconds: 20 + rpm: 5 + policy: + timeout_seconds: 2 + rpm: 60 + + risk_history_tool: + actions: + snapshot: + timeout_seconds: 60 + rpm: 1 + cleanup: + timeout_seconds: 30 + rpm: 2 + series: + timeout_seconds: 5 + rpm: 30 + digest: + timeout_seconds: 30 + rpm: 2 + + incident_intelligence_tool: + timeout_ms: 30000 + max_bytes_out: 524288 # 512KB (digests can be large) + rate_limit_rpm: 5 + actions: + correlate: + timeout_ms: 10000 + rate_limit_rpm: 10 + recurrence: + timeout_ms: 15000 + rate_limit_rpm: 5 + buckets: + timeout_ms: 15000 + rate_limit_rpm: 5 + weekly_digest: + timeout_ms: 30000 # longer: writes artifacts + autofollowups + rate_limit_rpm: 2 + + alert_ingest_tool: + timeout_ms: 5000 + max_chars_in: 32768 # 32KB — alert payload cap + max_bytes_out: 65536 # 64KB + rate_limit_rpm: 60 # monitor can send up to 60 alerts/min + concurrency: 5 + actions: + ingest: + rate_limit_rpm: 60 # monitor rate: 1/s + list: + rate_limit_rpm: 30 + timeout_ms: 5000 + ack: + rate_limit_rpm: 20 + claim: + rate_limit_rpm: 10 + timeout_ms: 5000 + fail: + rate_limit_rpm: 20 + + observability_tool: + timeout_ms: 15000 + max_chars_in: 5000 + max_bytes_out: 524288 # 512KB (metrics can be large) + rate_limit_rpm: 30 + concurrency: 3 + actions: + slo_snapshot: + timeout_ms: 10000 + rate_limit_rpm: 30 + + pr_reviewer_tool: + timeout_ms: 60000 # 60s (diff analysis can be slow) + max_chars_in: 409600 # 400KB (diff text) + max_bytes_out: 262144 # 256KB + rate_limit_rpm: 10 + concurrency: 2 + + contract_tool: + timeout_ms: 30000 + max_chars_in: 819200 # 800KB (openapi specs) + max_bytes_out: 262144 + rate_limit_rpm: 20 + + config_linter_tool: + timeout_ms: 30000 + max_chars_in: 409600 # 400KB + max_bytes_out: 131072 + rate_limit_rpm: 20 + + threatmodel_tool: + timeout_ms: 30000 + max_chars_in: 614400 # 600KB + max_bytes_out: 262144 + rate_limit_rpm: 10 + + job_orchestrator_tool: + timeout_ms: 600000 # 10 min (job execution) + max_chars_in: 10000 + max_bytes_out: 131072 + rate_limit_rpm: 10 + concurrency: 2 + + memory_search: + timeout_ms: 5000 + max_chars_in: 2000 + max_bytes_out: 65536 + rate_limit_rpm: 120 + + graph_query: + timeout_ms: 5000 + max_chars_in: 2000 + max_bytes_out: 65536 + rate_limit_rpm: 60 + + web_search: + timeout_ms: 15000 + max_chars_in: 500 + max_bytes_out: 131072 + rate_limit_rpm: 30 + + web_extract: + timeout_ms: 30000 + max_chars_in: 2000 + max_bytes_out: 524288 + rate_limit_rpm: 20 + + crawl4ai_scrape: + timeout_ms: 60000 + max_chars_in: 2000 + max_bytes_out: 1048576 # 1MB + rate_limit_rpm: 10 + + image_generate: + timeout_ms: 60000 + max_chars_in: 5000 + max_bytes_out: 5242880 # 5MB (base64 image) + rate_limit_rpm: 5 + concurrency: 1 + + comfy_generate_image: + timeout_ms: 120000 + max_chars_in: 5000 + max_bytes_out: 10485760 # 10MB + rate_limit_rpm: 3 + concurrency: 1 + + comfy_generate_video: + timeout_ms: 300000 + max_chars_in: 5000 + max_bytes_out: 52428800 # 50MB + rate_limit_rpm: 1 + concurrency: 1 + + tts_speak: + timeout_ms: 30000 + max_chars_in: 5000 + max_bytes_out: 5242880 + rate_limit_rpm: 10 + + presentation_create: + timeout_ms: 120000 + max_chars_in: 100000 + max_bytes_out: 20971520 # 20MB + rate_limit_rpm: 5 + + file_tool: + timeout_ms: 60000 + max_chars_in: 524288 + max_bytes_out: 20971520 + rate_limit_rpm: 10 + + data_governance_tool: + timeout_ms: 30000 # 30s (file I/O + regex scanning) + max_chars_in: 3000 # params only + max_bytes_out: 1048576 # 1MB (findings list can be verbose) + rate_limit_rpm: 5 # read-heavy, limit frequency + concurrency: 1 # serial: filesystem-bound + actions: + digest_audit: + timeout_ms: 20000 + rate_limit_rpm: 5 + + cost_analyzer_tool: + timeout_ms: 30000 # raised: Postgres queries may take longer + max_chars_in: 2000 # params only + max_bytes_out: 1048576 # 1MB (report may include many breakdowns) + rate_limit_rpm: 10 # light reads, allow more + concurrency: 2 + actions: + digest: + timeout_ms: 20000 + rate_limit_rpm: 5 + + dependency_scanner_tool: + timeout_ms: 45000 # 45s (online mode may need more time) + max_chars_in: 3000 # params only + max_bytes_out: 1048576 # 1MB (vuln list can be verbose) + rate_limit_rpm: 5 # expensive scan + concurrency: 1 # serial: avoids hammering OSV API + + drift_analyzer_tool: + timeout_ms: 30000 # 30s (reads many files) + max_chars_in: 5000 # params only, no large input + max_bytes_out: 524288 # 512KB (findings can be verbose) + rate_limit_rpm: 5 # expensive — limit calls + concurrency: 1 + + market_data: + timeout_ms: 10000 + max_chars_in: 1000 + max_bytes_out: 65536 + rate_limit_rpm: 60 + + architecture_pressure_tool: + service: + timeout_ms: 10000 + rate_limit_rpm: 30 + dashboard: + timeout_ms: 20000 + rate_limit_rpm: 10 + digest: + timeout_ms: 30000 + rate_limit_rpm: 2 + + backlog_tool: + list: + timeout_ms: 10000 + rate_limit_rpm: 30 + get: + timeout_ms: 5000 + rate_limit_rpm: 60 + dashboard: + timeout_ms: 10000 + rate_limit_rpm: 20 + create: + timeout_ms: 20000 + rate_limit_rpm: 5 + upsert: + timeout_ms: 20000 + rate_limit_rpm: 5 + set_status: + timeout_ms: 10000 + rate_limit_rpm: 10 + add_comment: + timeout_ms: 5000 + rate_limit_rpm: 20 + close: + timeout_ms: 10000 + rate_limit_rpm: 5 + auto_generate_weekly: + timeout_ms: 30000 + rate_limit_rpm: 2 + cleanup: + timeout_ms: 30000 + rate_limit_rpm: 2 + + calendar_tool: + timeout_ms: 30000 + max_chars_in: 12000 + max_bytes_out: 262144 + rate_limit_rpm: 20 + concurrency: 2 + + agent_email_tool: + timeout_ms: 30000 + max_chars_in: 10000 + max_bytes_out: 262144 + rate_limit_rpm: 10 + concurrency: 1 + + browser_tool: + timeout_ms: 60000 + max_chars_in: 8000 + max_bytes_out: 262144 + rate_limit_rpm: 8 + concurrency: 1 + + safe_code_executor_tool: + timeout_ms: 10000 + max_chars_in: 12000 + max_bytes_out: 131072 + rate_limit_rpm: 12 + concurrency: 2 + + secure_vault_tool: + timeout_ms: 15000 + max_chars_in: 4000 + max_bytes_out: 65536 + rate_limit_rpm: 20 + concurrency: 2 diff --git a/config/tools_rollout.yml b/config/tools_rollout.yml new file mode 100644 index 00000000..8d8f5e0a --- /dev/null +++ b/config/tools_rollout.yml @@ -0,0 +1,118 @@ +# Tool Rollout Configuration +# Defines default tool groups and role → tools mapping +# Used by agent_tools_config.py for automatic merge policy +# +# Syntax: +# - @group_name → expands to all tools in that group +# - tool_name → literal tool name + +# ─── Tool Groups ──────────────────────────────────────────────────────────── + +default_tools_read: + - repo_tool + - kb_tool + - oncall_tool + - observability_tool + - memory_search + - graph_query + - web_search + - web_extract + - remember_fact + +cto_tools: + - pr_reviewer_tool + - contract_tool + - config_linter_tool + - threatmodel_tool + - job_orchestrator_tool + - dependency_scanner_tool + - drift_analyzer_tool + - cost_analyzer_tool + - data_governance_tool + - calendar_tool + - agent_email_tool + - browser_tool + - safe_code_executor_tool + - secure_vault_tool + +content_tools: + - image_generate + - tts_speak + - presentation_create + - presentation_status + - presentation_download + - file_tool + - crawl4ai_scrape + +media_tools: + - comfy_generate_image + - comfy_generate_video + +# ─── Role Map ──────────────────────────────────────────────────────────────── +# Maps role → list of tool groups/tools +# Agents inherit tools from their role automatically. +# agent_specific tools are additive on top of role tools. + +role_map: + agent_default: + # All agents get read + content tools by default + tools: + - "@default_tools_read" + - "@content_tools" + + agent_cto: + # CTO-role agents (sofiia, yaromir) get everything + tools: + - "@default_tools_read" + - "@cto_tools" + - "@content_tools" + - "@media_tools" + + agent_oncall: + # Oncall agents: read + job orchestration + tools: + - "@default_tools_read" + - job_orchestrator_tool + + agent_media: + # Media/content agents: read + all media + tools: + - "@default_tools_read" + - "@content_tools" + - "@media_tools" + + agent_monitor: + # Monitor agents (per-node): read-only observability + health + KB + tools: + - observability_tool + - oncall_tool + - kb_tool + + agent_interface: + # Interface agents (AISTALK): minimal read + incident list + tools: + - kb_tool + - oncall_tool + +# ─── Agent → Role Assignment ───────────────────────────────────────────────── +agent_roles: + sofiia: agent_cto + admin: agent_cto + yaromir: agent_cto + helion: agent_oncall + alateya: agent_media + nutra: agent_media + agromatrix: agent_media + greenfood: agent_media + druid: agent_media + daarwizz: agent_default + clan: agent_default + eonarch: agent_media + senpai: agent_default + soul: agent_media + daarion: agent_media + oneok: agent_default + # Infrastructure / monitoring agents + monitor: agent_monitor + aistalk: agent_interface + # Fallback: unknown agents get agent_default diff --git a/docker-compose.node2-sofiia.yml b/docker-compose.node2-sofiia.yml index 2f2ac7ab..21166da5 100644 --- a/docker-compose.node2-sofiia.yml +++ b/docker-compose.node2-sofiia.yml @@ -37,8 +37,10 @@ services: - BACKLOG_BACKEND=auto - INCIDENT_BACKEND=auto - AUDIT_BACKEND=auto + - CALENDAR_SERVICE_URL=http://calendar-service:8001 volumes: - ./services/router/router-config.node2.yml:/app/router-config.yml:ro + - ./tools:/app/tools:ro - ./logs:/app/logs extra_hosts: - "host.docker.internal:host-gateway" @@ -47,6 +49,7 @@ services: depends_on: - dagi-nats - node-capabilities + - calendar-service networks: - dagi-network - dagi-memory-network @@ -77,6 +80,62 @@ services: - dagi-memory-network restart: unless-stopped + aurora-service: + build: + context: ./services/aurora-service + dockerfile: Dockerfile + container_name: aurora-service-node2 + ports: + - "127.0.0.1:9401:9401" + environment: + - AURORA_DATA_DIR=/data/aurora + - AURORA_PUBLIC_BASE_URL=http://127.0.0.1:9401 + - AURORA_CORS_ORIGINS=* + - AURORA_MODELS_DIR=/data/aurora/models + - AURORA_FORCE_CPU=false + - AURORA_PREFER_MPS=true + - AURORA_ENABLE_VIDEOTOOLBOX=true + volumes: + - sofiia-data:/data + networks: + - dagi-network + restart: unless-stopped + + radicale: + image: tomsquest/docker-radicale:latest + container_name: daarion-radicale-node2 + ports: + - "127.0.0.1:5232:5232" + environment: + - RADICALE_HOST=0.0.0.0 + - RADICALE_PORT=5232 + - RADICALE_LOG_LEVEL=INFO + volumes: + - radicale-data:/data + - radicale-config:/config + networks: + - dagi-network + restart: unless-stopped + + calendar-service: + build: + context: ./services/calendar-service + dockerfile: Dockerfile + container_name: calendar-service-node2 + ports: + - "127.0.0.1:8001:8001" + environment: + - DATABASE_URL=sqlite:////data/calendar.db + - RADICALE_URL=http://radicale:5232 + - PYTHONUNBUFFERED=1 + depends_on: + - radicale + volumes: + - sofiia-data:/data + networks: + - dagi-network + restart: unless-stopped + dagi-nats: image: nats:2.10-alpine container_name: dagi-nats-node2 @@ -157,11 +216,17 @@ services: - NODE_DEFAULT_VISION=llava:13b - NODE_WORKER_MAX_CONCURRENCY=2 - NCS_REPORT_URL=http://node-capabilities:8099 - # Capability providers (none = not available on this node) - - STT_PROVIDER=none - - TTS_PROVIDER=none + # Capability providers: delegate STT/TTS to Memory Service (Phase 1) + # Set to mlx_whisper/mlx_kokoro for Phase 2 MLX upgrade + - STT_PROVIDER=memory_service + - TTS_PROVIDER=memory_service + - MEMORY_SERVICE_URL=http://memory-service:8000 - OCR_PROVIDER=vision_prompted - IMAGE_PROVIDER=none + # Voice HA: dedicated concurrency limits for voice.*.request subjects + - VOICE_MAX_CONCURRENT_TTS=4 + - VOICE_MAX_CONCURRENT_LLM=2 + - VOICE_MAX_CONCURRENT_STT=2 depends_on: - dagi-nats networks: @@ -187,15 +252,30 @@ services: - OLLAMA_URL=${OLLAMA_URL:-http://host.docker.internal:11434} - NOTION_API_KEY=${NOTION_API_KEY:-} - OPENCODE_URL=${OPENCODE_URL:-} + - AURORA_SERVICE_URL=http://aurora-service:9401 # P1 SECURITY: SSH_PASSWORD removed — use key file instead # NODES_NODA1_SSH_PASSWORD is NO LONGER passed; sofiia-console reads from key file - NODES_NODA1_SSH_PRIVATE_KEY=/run/secrets/noda1_ssh_key - SUPERVISOR_API_KEY=${SUPERVISOR_API_KEY} - SOFIIA_CONSOLE_API_KEY=${SOFIIA_CONSOLE_API_KEY} - CORS_ORIGINS=${CORS_ORIGINS:-} + # Voice HA feature flag (default: false = legacy direct path to memory-service) + - VOICE_HA_ENABLED=${VOICE_HA_ENABLED:-false} + - VOICE_HA_ROUTER_URL=${VOICE_HA_ROUTER_URL:-http://router:8000} + # Projects / Documents / Sessions / Dialog Map (SQLite) + - SOFIIA_DATA_DIR=/app/data + - UPLOAD_MAX_IMAGE_MB=${UPLOAD_MAX_IMAGE_MB:-10} + - UPLOAD_MAX_VIDEO_MB=${UPLOAD_MAX_VIDEO_MB:-200} + - UPLOAD_MAX_DOC_MB=${UPLOAD_MAX_DOC_MB:-50} + # Phase 2 feature flags (disabled by default) + - USE_FABRIC_OCR=${USE_FABRIC_OCR:-false} + - USE_EMBEDDINGS=${USE_EMBEDDINGS:-false} + # Supervisor proxy (LangGraph workflows) + - SUPERVISOR_URL=${SUPERVISOR_URL:-http://sofiia-supervisor:8080} volumes: - ./config:/app/config - ./secrets/noda1_id_ed25519:/run/secrets/noda1_ssh_key:ro + - sofiia-data:/app/data depends_on: - router networks: @@ -210,3 +290,11 @@ networks: dagi-memory-network: external: true name: dagi-memory-network-node2 + +volumes: + sofiia-data: + driver: local + radicale-data: + driver: local + radicale-config: + driver: local diff --git a/services/calendar-service/Dockerfile b/services/calendar-service/Dockerfile new file mode 100644 index 00000000..f437693e --- /dev/null +++ b/services/calendar-service/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.11-slim + +WORKDIR /app + +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt + +COPY . . + +EXPOSE 8001 + +CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8001"] diff --git a/services/calendar-service/storage.py b/services/calendar-service/storage.py new file mode 100644 index 00000000..80630ff6 --- /dev/null +++ b/services/calendar-service/storage.py @@ -0,0 +1,198 @@ +""" +Calendar Storage - Database models and operations +""" +from __future__ import annotations + +import os +import logging +from datetime import datetime +from typing import Optional, List +from dataclasses import dataclass + +logger = logging.getLogger(__name__) + + +# Simple SQLite-based storage (can be replaced with Postgres) +class CalendarStorage: + """In-memory/SQLite storage for calendar accounts and reminders""" + + def __init__(self, db_session=None): + self.db = db_session + self._accounts = {} + self._reminders = {} + self._idempotency_keys = {} + self._next_id = 1 + + def create_account( + self, + workspace_id: str, + user_id: str, + provider: str, + username: str, + password: str, + principal_url: str = None, + default_calendar_id: str = None + ): + """Create calendar account""" + # Check if exists + for acc in self._accounts.values(): + if (acc.workspace_id == workspace_id and + acc.user_id == user_id and + acc.provider == provider): + # Update + acc.username = username + acc.password = password + acc.principal_url = principal_url + acc.default_calendar_id = default_calendar_id + acc.updated_at = datetime.utcnow() + return acc + + # Create new + account = CalendarAccount( + id=f"acc_{self._next_id}", + workspace_id=workspace_id, + user_id=user_id, + provider=provider, + username=username, + password=password, # In production, encrypt this! + principal_url=principal_url, + default_calendar_id=default_calendar_id + ) + + self._accounts[account.id] = account + self._next_id += 1 + + logger.info(f"Created calendar account: {account.id}") + + return account + + def get_account(self, account_id: str) -> Optional[CalendarAccount]: + """Get account by ID""" + return self._accounts.get(account_id) + + def list_accounts( + self, + workspace_id: str, + user_id: str + ) -> List[CalendarAccount]: + """List accounts for user""" + return [ + acc for acc in self._accounts.values() + if acc.workspace_id == workspace_id and acc.user_id == user_id + ] + + def count_accounts(self) -> int: + """Count total accounts""" + return len(self._accounts) + + def create_reminder( + self, + workspace_id: str, + user_id: str, + account_id: str, + event_uid: str, + remind_at: str, + channel: str = "inapp" + ) -> "CalendarReminder": + """Create reminder""" + reminder = CalendarReminder( + id=f"rem_{self._next_id}", + workspace_id=workspace_id, + user_id=user_id, + account_id=account_id, + event_uid=event_uid, + remind_at=datetime.fromisoformat(remind_at), + channel=channel, + status="pending" + ) + + self._reminders[reminder.id] = reminder + self._next_id += 1 + + logger.info(f"Created reminder: {reminder.id}") + + return reminder + + def get_pending_reminders(self) -> List["CalendarReminder"]: + """Get pending reminders""" + now = datetime.utcnow() + return [ + r for r in self._reminders.values() + if r.status == "pending" and r.remind_at <= now + ] + + def update_reminder_status( + self, + reminder_id: str, + status: str, + error: str = None + ): + """Update reminder status""" + if reminder_id in self._reminders: + self._reminders[reminder_id].status = status + self._reminders[reminder_id].attempts += 1 + if error: + self._reminders[reminder_id].last_error = error + + def count_pending_reminders(self) -> int: + """Count pending reminders""" + return len([r for r in self._reminders.values() if r.status == "pending"]) + + def store_idempotency_key( + self, + key: str, + workspace_id: str, + user_id: str, + event_uid: str + ): + """Store idempotency key""" + self._idempotency_keys[key] = { + "workspace_id": workspace_id, + "user_id": user_id, + "event_uid": event_uid + } + + def get_by_idempotency_key(self, key: str) -> Optional[dict]: + """Get event UID by idempotency key""" + return self._idempotency_keys.get(key) + + +@dataclass +class CalendarAccount: + """Calendar account model""" + id: str + workspace_id: str + user_id: str + provider: str + username: str + password: str + principal_url: str = None + default_calendar_id: str = None + created_at: datetime = None + updated_at: datetime = None + + def __post_init__(self): + if self.created_at is None: + self.created_at = datetime.utcnow() + if self.updated_at is None: + self.updated_at = datetime.utcnow() + + +@dataclass +class CalendarReminder: + """Calendar reminder model""" + id: str + workspace_id: str + user_id: str + account_id: str + event_uid: str + remind_at: datetime + channel: str + status: str = "pending" + attempts: int = 0 + last_error: str = None + created_at: datetime = None + + def __post_init__(self): + if self.created_at is None: + self.created_at = datetime.utcnow() diff --git a/services/router/Dockerfile b/services/router/Dockerfile index ca9a3528..c910b53d 100644 --- a/services/router/Dockerfile +++ b/services/router/Dockerfile @@ -10,6 +10,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ # Install dependencies COPY requirements.txt . RUN pip install --no-cache-dir -r requirements.txt +RUN python -m playwright install --with-deps chromium # Copy application COPY . . @@ -37,4 +38,3 @@ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"] - diff --git a/services/router/agent_tools_config.py b/services/router/agent_tools_config.py index f77d149a..66afafca 100644 --- a/services/router/agent_tools_config.py +++ b/services/router/agent_tools_config.py @@ -141,6 +141,11 @@ AGENT_SPECIALIZED_TOOLS = { 'cost_analyzer_tool', 'pieces_tool', 'notion_tool', + 'calendar_tool', + 'agent_email_tool', + 'browser_tool', + 'safe_code_executor_tool', + 'secure_vault_tool', ], # Admin - platform operations @@ -154,6 +159,11 @@ AGENT_SPECIALIZED_TOOLS = { 'cost_analyzer_tool', 'pieces_tool', 'notion_tool', + 'calendar_tool', + 'agent_email_tool', + 'browser_tool', + 'safe_code_executor_tool', + 'secure_vault_tool', ], # Daarion - Media Generation diff --git a/services/router/requirements.txt b/services/router/requirements.txt index ef1fa8c8..21fe57f5 100644 --- a/services/router/requirements.txt +++ b/services/router/requirements.txt @@ -11,6 +11,9 @@ pypdf>=5.1.0 python-pptx>=0.6.23 odfpy>=1.4.1 pyarrow>=18.0.0 +cryptography>=41.0.0 +aiofiles>=23.2.1 +playwright>=1.40.0 # Memory Retrieval v3.0 asyncpg>=0.29.0 diff --git a/services/router/tool_manager.py b/services/router/tool_manager.py index 8e39c374..84ae8d57 100644 --- a/services/router/tool_manager.py +++ b/services/router/tool_manager.py @@ -12,6 +12,7 @@ import logging import hashlib import base64 import csv +import re import tempfile import subprocess import httpx @@ -431,18 +432,18 @@ TOOL_DEFINITIONS = [ "type": "function", "function": { "name": "market_data", - "description": "📊 Отримати real-time ринкові дані: поточну ціну, котирування, обсяги, аналітичні фічі (VWAP, spread, volatility, trade signals). Доступні символи: BTCUSDT, ETHUSDT.", + "description": "📊 Real-time ринкові дані: ціна, стакан, обсяги, 24h статистика, аналітичні фічі. Підтримувані символи (23): BTC, ETH, BNB, SOL, XRP, ADA, DOGE, AVAX, DOT, LINK, POL(MATIC), SHIB, TRX, UNI, LTC, ATOM, NEAR, ICP, FIL, APT (Binance Spot), PAXGUSDT (Gold-backed), XAUUSDT (Gold via Kraken), XAGUSDT (Silver via Kraken).", "parameters": { "type": "object", "properties": { "symbol": { "type": "string", - "description": "Торговий символ (наприклад BTCUSDT, ETHUSDT)" + "description": "Символ пари: BTCUSDT, ETHUSDT, SOLUSDT, XAUUSDT, XAGUSDT, PAXGUSDT тощо. Підтримується 23 символи." }, "query_type": { "type": "string", - "enum": ["price", "features", "all"], - "description": "Тип запиту: price (ціна + котирування), features (аналітичні фічі), all (все разом)", + "enum": ["price", "features", "all", "multi"], + "description": "price=ціна+стакан, features=VWAP/volatility/signals, all=повний, multi=всі 23 символи одразу", "default": "all" } }, @@ -450,6 +451,49 @@ TOOL_DEFINITIONS = [ } } }, + # PRIORITY 7b: Binance Bot Monitor (SenpAI) + { + "type": "function", + "function": { + "name": "binance_bots_top", + "description": "🤖 Показати топ торгових ботів Binance Marketplace (Spot/Futures Grid): ROI, PNL, риск-рейтинг. Також показує результати web-пошуку по Binance ботах.", + "parameters": { + "type": "object", + "properties": { + "grid_type": { + "type": "string", + "enum": ["SPOT", "FUTURES"], + "description": "Тип гриду: SPOT або FUTURES", + "default": "SPOT" + }, + "limit": { + "type": "integer", + "description": "Кількість ботів (max 20)", + "default": 10 + } + }, + "required": [] + } + } + }, + { + "type": "function", + "function": { + "name": "binance_account_bots", + "description": "💼 Показати стан власного Binance суб-акаунту: баланси, активні grid/algo ордери, типи дозволів (canTrade, permissions).", + "parameters": { + "type": "object", + "properties": { + "force_refresh": { + "type": "boolean", + "description": "Примусово оновити дані з Binance API (ігноруючи кеш)", + "default": False + } + }, + "required": [] + } + } + }, # PRIORITY 8: 1OK Window Master tools { "type": "function", @@ -1464,6 +1508,155 @@ TOOL_DEFINITIONS = [ "required": ["action"] } } + }, + { + "type": "function", + "function": { + "name": "pieces_tool", + "description": "🧩 Pieces OS integration: status/ping for local workstream processors.", + "parameters": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["status", "workstream_status", "ping"], + "description": "Pieces action (defaults to status)" + }, + "base_url": { + "type": "string", + "description": "Optional override for Pieces OS base URL" + } + } + } + } + }, + { + "type": "function", + "function": { + "name": "calendar_tool", + "description": "📅 Calendar management via calendar-service (CalDAV/Radicale).", + "parameters": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["connect", "list_calendars", "list_events", "get_event", "create_event", "update_event", "delete_event", "set_reminder"], + "description": "Calendar action" + }, + "workspace_id": {"type": "string", "description": "Workspace id"}, + "user_id": {"type": "string", "description": "User id"}, + "account_id": {"type": "string", "description": "Connected calendar account id"}, + "calendar_id": {"type": "string", "description": "Optional calendar id"}, + "params": {"type": "object", "description": "Action-specific payload"} + }, + "required": ["action"] + } + } + }, + { + "type": "function", + "function": { + "name": "agent_email_tool", + "description": "✉️ Agent email automation: inbox lifecycle, send/receive, email analysis.", + "parameters": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["create_inbox", "list_inboxes", "delete_inbox", "send", "receive", "analyze_email"], + "description": "Email action" + }, + "to": {"type": "array", "items": {"type": "string"}}, + "subject": {"type": "string"}, + "body": {"type": "string"}, + "html": {"type": "string"}, + "attachments": {"type": "array", "items": {"type": "string"}}, + "cc": {"type": "array", "items": {"type": "string"}}, + "bcc": {"type": "array", "items": {"type": "string"}}, + "inbox_id": {"type": "string"}, + "unread_only": {"type": "boolean"}, + "limit": {"type": "integer"}, + "query": {"type": "string"}, + "email": {"type": "object"} + }, + "required": ["action"] + } + } + }, + { + "type": "function", + "function": { + "name": "browser_tool", + "description": "🌐 Browser automation with managed per-agent sessions.", + "parameters": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["start_session", "restore_session", "close_session", "goto", "act", "extract", "observe", "screenshot", "fill_form", "wait_for", "get_current_url", "get_page_text"], + "description": "Browser action" + }, + "url": {"type": "string"}, + "instruction": {"type": "string"}, + "schema": {"type": "object"}, + "fields": {"type": "array", "items": {"type": "object"}}, + "selector_or_text": {"type": "string"}, + "timeout": {"type": "integer"}, + "headless": {"type": "boolean"}, + "proxy": {"type": "string"}, + "stealth": {"type": "boolean"}, + "restore_existing": {"type": "boolean"} + }, + "required": ["action"] + } + } + }, + { + "type": "function", + "function": { + "name": "safe_code_executor_tool", + "description": "🧪 Sandboxed code execution with security validation and limits.", + "parameters": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["validate", "execute", "execute_async", "get_job_result", "get_stats", "kill_process"], + "description": "Executor action" + }, + "language": {"type": "string", "enum": ["python", "javascript", "js"]}, + "code": {"type": "string"}, + "stdin": {"type": "string"}, + "limits": {"type": "object"}, + "job_id": {"type": "string"}, + "execution_id": {"type": "string"} + }, + "required": ["action"] + } + } + }, + { + "type": "function", + "function": { + "name": "secure_vault_tool", + "description": "🔐 Encrypted credential storage for agents.", + "parameters": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["store", "get", "delete", "list", "check_expiring", "vacuum"], + "description": "Vault action" + }, + "service": {"type": "string"}, + "credential_name": {"type": "string"}, + "value": {}, + "ttl_seconds": {"type": "integer"}, + "days": {"type": "integer"} + }, + "required": ["action"] + } + } } ] @@ -1488,11 +1681,17 @@ class ToolManager: self.http_client = httpx.AsyncClient(timeout=60.0) self.swapper_url = os.getenv("SWAPPER_URL", "http://swapper-service:8890") self.comfy_agent_url = os.getenv("COMFY_AGENT_URL", "http://212.8.58.133:8880") + self.image_gen_service_url = os.getenv("IMAGE_GEN_SERVICE_URL", "http://image-gen-service:7860") + self.calendar_service_url = os.getenv("CALENDAR_SERVICE_URL", "http://calendar-service:8001").rstrip("/") self.oneok_crm_url = os.getenv("ONEOK_CRM_BASE_URL", "http://oneok-crm-adapter:8088").rstrip("/") self.oneok_calc_url = os.getenv("ONEOK_CALC_BASE_URL", "http://oneok-calc-adapter:8089").rstrip("/") self.oneok_docs_url = os.getenv("ONEOK_DOCS_BASE_URL", "http://oneok-docs-adapter:8090").rstrip("/") self.oneok_schedule_url = os.getenv("ONEOK_SCHEDULE_BASE_URL", "http://oneok-schedule-adapter:8091").rstrip("/") self.oneok_adapter_api_key = os.getenv("ONEOK_ADAPTER_API_KEY", "").strip() + self._agent_email_clients: Dict[str, Any] = {} + self._browser_clients: Dict[str, Any] = {} + self._safe_code_executor: Any = None + self._secure_vault: Any = None self.tools_config = self._load_tools_config() def _load_tools_config(self) -> Dict[str, Dict]: @@ -1605,6 +1804,10 @@ class ToolManager: # Priority 7: Market Data (SenpAI) elif tool_name == "market_data": result = await self._market_data(arguments) + elif tool_name == "binance_bots_top": + result = await self._binance_bots_top(arguments) + elif tool_name == "binance_account_bots": + result = await self._binance_account_bots(arguments) # Priority 8: 1OK tools elif tool_name == "crm_search_client": result = await self._crm_search_client(arguments) @@ -1690,6 +1893,31 @@ class ToolManager: # Priority 18: Pieces OS integration elif tool_name == "pieces_tool": result = await self._pieces_tool(arguments) + # Priority 19: Calendar integration + elif tool_name == "calendar_tool": + enriched = dict(arguments or {}) + enriched.setdefault("agent_id", agent_id or "sofiia") + enriched.setdefault("workspace_id", workspace_id or "default") + if user_id: + enriched.setdefault("user_id", user_id) + result = await self._calendar_tool(enriched) + # Priority 20: Local automations toolkit + elif tool_name == "agent_email_tool": + enriched = dict(arguments or {}) + enriched.setdefault("agent_id", agent_id or "sofiia") + result = await self._agent_email_tool(enriched) + elif tool_name == "browser_tool": + enriched = dict(arguments or {}) + enriched.setdefault("agent_id", agent_id or "sofiia") + result = await self._browser_tool(enriched) + elif tool_name == "safe_code_executor_tool": + enriched = dict(arguments or {}) + enriched.setdefault("agent_id", agent_id or "sofiia") + result = await self._safe_code_executor_tool(enriched) + elif tool_name == "secure_vault_tool": + enriched = dict(arguments or {}) + enriched.setdefault("agent_id", agent_id or "sofiia") + result = await self._secure_vault_tool(enriched) else: result = ToolResult(success=False, result=None, error=f"Unknown tool: {tool_name}") except Exception as e: @@ -2125,6 +2353,433 @@ class ToolManager: error=f"Pieces OS unavailable: {str(e)}", ) + async def _calendar_tool(self, args: Dict[str, Any]) -> ToolResult: + payload = args or {} + action = str(payload.get("action") or "").strip() + if not action: + return ToolResult(success=False, result=None, error="Missing action") + + req = { + "action": action, + "workspace_id": str(payload.get("workspace_id") or "default"), + "user_id": str(payload.get("user_id") or payload.get("agent_id") or "sofiia"), + "account_id": payload.get("account_id"), + "calendar_id": payload.get("calendar_id"), + "params": payload.get("params") if isinstance(payload.get("params"), dict) else {}, + } + headers = { + "x-agent-id": str(payload.get("agent_id") or "sofiia"), + "x-workspace-id": req["workspace_id"], + "content-type": "application/json", + } + url = f"{self.calendar_service_url}/v1/tools/calendar" + try: + resp = await self.http_client.post(url, json=req, headers=headers, timeout=20.0) + if resp.status_code != 200: + return ToolResult(success=False, result=None, error=f"calendar-service HTTP {resp.status_code}: {resp.text[:300]}") + data = resp.json() + if isinstance(data, dict) and data.get("status") in {"failed", "error"}: + return ToolResult(success=False, result=None, error=str(data.get("detail") or data.get("error") or "Calendar error")) + return ToolResult(success=True, result=data) + except Exception as e: + return ToolResult(success=False, result=None, error=f"Calendar service unavailable: {e}") + + def _get_agent_email_client(self, agent_id: str): + if agent_id in self._agent_email_clients: + return self._agent_email_clients[agent_id] + try: + from tools.agent_email.agent_email import AgentEmailTool # type: ignore + except Exception as e: + raise RuntimeError(f"AgentEmailTool import failed: {e}") + client = AgentEmailTool(agent_id=agent_id) + self._agent_email_clients[agent_id] = client + return client + + def _get_browser_client(self, agent_id: str): + if agent_id in self._browser_clients: + return self._browser_clients[agent_id] + try: + from tools.browser_tool.browser_tool import BrowserTool # type: ignore + except Exception as e: + raise RuntimeError(f"BrowserTool import failed: {e}") + client = BrowserTool(agent_id=agent_id, headless=True) + self._browser_clients[agent_id] = client + return client + + def _get_safe_executor(self): + if self._safe_code_executor is not None: + return self._safe_code_executor + try: + from tools.safe_code_executor.safe_code_executor import SafeCodeExecutor # type: ignore + except Exception as e: + raise RuntimeError(f"SafeCodeExecutor import failed: {e}") + self._safe_code_executor = SafeCodeExecutor() + return self._safe_code_executor + + def _get_secure_vault(self): + if self._secure_vault is not None: + return self._secure_vault + try: + from tools.secure_vault.secure_vault import SecureVault # type: ignore + except Exception as e: + raise RuntimeError(f"SecureVault import failed: {e}") + self._secure_vault = SecureVault() + return self._secure_vault + + async def _agent_email_tool(self, args: Dict[str, Any]) -> ToolResult: + payload = args or {} + action = str(payload.get("action") or "").strip().lower() + agent_id = str(payload.get("agent_id") or "sofiia") + if not action: + return ToolResult(success=False, result=None, error="Missing action") + try: + tool = self._get_agent_email_client(agent_id) + if action == "create_inbox": + return ToolResult(success=True, result=tool.create_inbox( + username=payload.get("username"), + domain=payload.get("domain"), + display_name=payload.get("display_name"), + )) + if action == "list_inboxes": + return ToolResult(success=True, result={"inboxes": tool.list_inboxes()}) + if action == "delete_inbox": + return ToolResult(success=True, result=tool.delete_inbox(inbox_id=payload.get("inbox_id"))) + if action == "send": + to = payload.get("to") if isinstance(payload.get("to"), list) else [] + return ToolResult(success=True, result=tool.send( + to=to, + subject=str(payload.get("subject") or ""), + body=str(payload.get("body") or ""), + html=payload.get("html"), + attachments=payload.get("attachments") if isinstance(payload.get("attachments"), list) else None, + cc=payload.get("cc") if isinstance(payload.get("cc"), list) else None, + bcc=payload.get("bcc") if isinstance(payload.get("bcc"), list) else None, + inbox_id=payload.get("inbox_id"), + )) + if action == "receive": + return ToolResult(success=True, result={ + "emails": tool.receive( + unread_only=bool(payload.get("unread_only", True)), + limit=int(payload.get("limit", 20)), + query=payload.get("query"), + inbox_id=payload.get("inbox_id"), + ) + }) + if action == "analyze_email": + email_obj = payload.get("email") if isinstance(payload.get("email"), dict) else {} + return ToolResult(success=True, result=tool.analyze_and_extract(email_obj)) + return ToolResult(success=False, result=None, error=f"Unsupported action: {action}") + except Exception as e: + return ToolResult(success=False, result=None, error=f"Agent email tool error: {e}") + + async def _browser_tool(self, args: Dict[str, Any]) -> ToolResult: + payload = args or {} + action = str(payload.get("action") or "").strip().lower() + agent_id = str(payload.get("agent_id") or "sofiia") + if not action: + return ToolResult(success=False, result=None, error="Missing action") + try: + tool = self._get_browser_client(agent_id) + if action == "start_session": + result = await asyncio.to_thread( + tool.start_session, + headless=payload.get("headless"), + proxy=payload.get("proxy"), + stealth=payload.get("stealth"), + restore_existing=bool(payload.get("restore_existing", True)), + ) + return ToolResult(success=True, result=result) + if action == "restore_session": + result = await asyncio.to_thread(tool.restore_session, agent_id) + return ToolResult(success=True, result=result) + if action == "close_session": + result = await asyncio.to_thread(tool.close_session) + return ToolResult(success=True, result=result) + if action == "goto": + result = await asyncio.to_thread(tool.goto, str(payload.get("url") or "")) + return ToolResult(success=True, result=result) + if action == "act": + result = await asyncio.to_thread(tool.act, str(payload.get("instruction") or "")) + return ToolResult(success=True, result=result) + if action == "extract": + result = await asyncio.to_thread( + tool.extract, + instruction=str(payload.get("instruction") or ""), + schema=payload.get("schema") if isinstance(payload.get("schema"), dict) else None, + ) + return ToolResult(success=True, result=result) + if action == "observe": + actions = await asyncio.to_thread(tool.observe, instruction=payload.get("instruction")) + return ToolResult(success=True, result={"actions": actions}) + if action == "screenshot": + shot = await asyncio.to_thread(tool.screenshot) + if isinstance(shot, bytes): + return ToolResult(success=True, result={"status": "ok"}, file_base64=self._b64_from_bytes(shot), file_name="screenshot.png", file_mime="image/png") + return ToolResult(success=True, result={"path": shot}) + if action == "fill_form": + fields = payload.get("fields") if isinstance(payload.get("fields"), list) else [] + result = await asyncio.to_thread(tool.fill_form, fields) + return ToolResult(success=True, result=result) + if action == "wait_for": + found = await asyncio.to_thread( + tool.wait_for, + str(payload.get("selector_or_text") or ""), + timeout=int(payload.get("timeout", 10)), + ) + return ToolResult(success=True, result={"found": found}) + if action == "get_current_url": + url = await asyncio.to_thread(tool.get_current_url) + return ToolResult(success=True, result={"url": url}) + if action == "get_page_text": + text = await asyncio.to_thread(tool.get_page_text) + return ToolResult(success=True, result={"text": text}) + return ToolResult(success=False, result=None, error=f"Unsupported action: {action}") + except Exception as e: + return ToolResult(success=False, result=None, error=f"Browser tool error: {e}") + + async def _safe_code_executor_tool(self, args: Dict[str, Any]) -> ToolResult: + payload = args or {} + action = str(payload.get("action") or "").strip().lower() + if not action: + return ToolResult(success=False, result=None, error="Missing action") + try: + executor = self._get_safe_executor() + if action == "validate": + language = str(payload.get("language") or "") + code = str(payload.get("code") or "") + err = executor.validate(language, code) + return ToolResult(success=err is None, result={"valid": err is None, "error": err}) + if action == "execute": + return ToolResult(success=True, result=executor.execute( + language=str(payload.get("language") or ""), + code=str(payload.get("code") or ""), + stdin=payload.get("stdin"), + limits=payload.get("limits") if isinstance(payload.get("limits"), dict) else None, + context={"agent_id": payload.get("agent_id")}, + )) + if action == "execute_async": + job_id = executor.execute_async( + language=str(payload.get("language") or ""), + code=str(payload.get("code") or ""), + stdin=payload.get("stdin"), + limits=payload.get("limits") if isinstance(payload.get("limits"), dict) else None, + context={"agent_id": payload.get("agent_id")}, + ) + return ToolResult(success=True, result={"job_id": job_id}) + if action == "get_job_result": + return ToolResult(success=True, result={"job": executor.get_job_result(str(payload.get("job_id") or ""))}) + if action == "get_stats": + return ToolResult(success=True, result=executor.get_stats()) + if action == "kill_process": + ok = executor.kill_process(str(payload.get("execution_id") or "")) + return ToolResult(success=ok, result={"killed": ok}, error=None if ok else "execution_id not found") + return ToolResult(success=False, result=None, error=f"Unsupported action: {action}") + except Exception as e: + return ToolResult(success=False, result=None, error=f"Safe code executor error: {e}") + + async def _secure_vault_tool(self, args: Dict[str, Any]) -> ToolResult: + payload = args or {} + action = str(payload.get("action") or "").strip().lower() + agent_id = str(payload.get("agent_id") or "sofiia") + if not action: + return ToolResult(success=False, result=None, error="Missing action") + try: + vault = self._get_secure_vault() + if action == "store": + return ToolResult(success=True, result=vault.store( + agent_id=agent_id, + service=str(payload.get("service") or ""), + credential_name=str(payload.get("credential_name") or ""), + value=payload.get("value"), + ttl_seconds=int(payload["ttl_seconds"]) if payload.get("ttl_seconds") is not None else None, + )) + if action == "get": + value = vault.get( + agent_id=agent_id, + service=str(payload.get("service") or ""), + credential_name=str(payload.get("credential_name") or ""), + ) + return ToolResult(success=True, result={"value": value, "found": value is not None}) + if action == "delete": + return ToolResult(success=True, result=vault.delete( + agent_id=agent_id, + service=str(payload.get("service") or ""), + credential_name=str(payload.get("credential_name") or ""), + )) + if action == "list": + listed = vault.list(agent_id=agent_id, service=payload.get("service")) + key = "credentials" if payload.get("service") else "services" + return ToolResult(success=True, result={key: listed}) + if action == "check_expiring": + return ToolResult(success=True, result={"items": vault.check_expiring(agent_id, days=int(payload.get("days", 7)))}) + if action == "vacuum": + return ToolResult(success=True, result=vault.vacuum(agent_id)) + return ToolResult(success=False, result=None, error=f"Unsupported action: {action}") + except Exception as e: + return ToolResult(success=False, result=None, error=f"Secure vault tool error: {e}") + + async def _kb_tool(self, args: Dict[str, Any]) -> ToolResult: + """ + Knowledge Base Tool (read-only). + Actions: search, snippets, open, sources. + """ + payload = args or {} + action = str(payload.get("action") or "search").strip().lower() + params = payload.get("params") if isinstance(payload.get("params"), dict) else {} + + repo_root = Path(os.getenv("REPO_ROOT", "/app")).resolve() + default_paths = ["docs", "runbooks", "ops", "adr", "specs"] + excluded_dirs = {"node_modules", "vendor", "dist", "build", ".git", "__pycache__", ".pytest_cache", "venv", ".venv"} + allowed_ext = {".md", ".txt", ".yaml", ".yml", ".json"} + + def _safe_roots(paths: Any) -> List[Path]: + roots: List[Path] = [] + rels = paths if isinstance(paths, list) and paths else default_paths + for rel in rels: + if not isinstance(rel, str) or not rel.strip(): + continue + cand = (repo_root / rel.strip()).resolve() + if str(cand).startswith(str(repo_root)) and cand.exists() and cand.is_dir(): + roots.append(cand) + return roots + + def _is_excluded(path: Path) -> bool: + return any(part in excluded_dirs for part in path.parts) + + def _iter_candidates(paths: Any, file_glob: str) -> List[Path]: + roots = _safe_roots(paths) + out: List[Path] = [] + pattern = file_glob or "**/*.md" + for root in roots: + try: + for p in root.glob(pattern): + if not p.is_file(): + continue + if p.suffix.lower() not in allowed_ext: + continue + if _is_excluded(p): + continue + out.append(p) + except Exception: + continue + return out + + def _read_text(path: Path, max_bytes: int = 120000) -> str: + try: + return path.read_text(encoding="utf-8", errors="ignore")[:max_bytes] + except Exception: + return "" + + def _tokens(query: str) -> List[str]: + return [t for t in re.findall(r"\w+", (query or "").lower()) if len(t) > 1] + + def _rel(path: Path) -> str: + return str(path.resolve().relative_to(repo_root)) + + if action == "sources": + rows = [] + for root in _safe_roots(params.get("paths")): + count = 0 + for p in root.rglob("*"): + if p.is_file() and p.suffix.lower() in allowed_ext and not _is_excluded(p): + count += 1 + rows.append({"path": _rel(root), "file_count": count, "status": "indexed"}) + return ToolResult(success=True, result={"summary": f"Found {len(rows)} indexed sources", "sources": rows, "allowed_paths": default_paths}) + + if action == "open": + raw_path = str(params.get("path") or "").strip() + if not raw_path: + return ToolResult(success=False, result=None, error="path is required") + target = (repo_root / raw_path).resolve() + if not str(target).startswith(str(repo_root)): + return ToolResult(success=False, result=None, error="Path traversal blocked") + if not any(str(target).startswith(str(root)) for root in _safe_roots(default_paths)): + return ToolResult(success=False, result=None, error="Path not in allowed directories") + if not target.exists() or not target.is_file(): + return ToolResult(success=False, result=None, error="File not found") + + start_line = max(1, int(params.get("start_line", 1))) + end_line_raw = params.get("end_line") + end_line = int(end_line_raw) if end_line_raw is not None else None + max_bytes = int(params.get("max_bytes", 200000)) + content = _read_text(target, max_bytes=max_bytes) + lines = content.splitlines() + sliced = lines[start_line - 1:end_line] if end_line else lines[start_line - 1:] + return ToolResult( + success=True, + result={ + "path": _rel(target), + "start_line": start_line, + "end_line": end_line or (start_line + len(sliced) - 1), + "total_lines": len(lines), + "content": "\n".join(sliced), + "truncated": len(content) >= max_bytes, + }, + ) + + if action in {"search", "snippets"}: + query = str(params.get("query") or "").strip() + if not query: + return ToolResult(success=False, result=None, error="query is required") + + limit = max(1, min(int(params.get("limit", 20 if action == "search" else 8)), 100)) + context_lines = max(0, min(int(params.get("context_lines", 4)), 20)) + max_chars = max(100, min(int(params.get("max_chars_per_snippet", 800)), 5000)) + candidates = _iter_candidates(params.get("paths"), params.get("file_glob", "**/*.md")) + q_tokens = _tokens(query) + + ranked = [] + for p in candidates: + text = _read_text(p) + if not text: + continue + lower = text.lower() + score = float(sum(lower.count(tok) for tok in q_tokens)) + if score <= 0: + continue + ranked.append((score, p, text)) + ranked.sort(key=lambda x: x[0], reverse=True) + + if action == "search": + results = [] + for score, p, text in ranked[:limit]: + highlights: List[str] = [] + lower = text.lower() + for tok in q_tokens[:5]: + idx = lower.find(tok) + if idx >= 0: + s = max(0, idx - 30) + e = min(len(text), idx + len(tok) + 30) + highlights.append("..." + text[s:e].replace("\n", " ") + "...") + results.append({"path": _rel(p), "score": round(score, 2), "highlights": highlights[:5]}) + return ToolResult( + success=True, + result={"summary": f"Found {len(results)} results for '{query}'", "results": results, "query": query, "count": len(results)}, + ) + + snippets = [] + for score, p, text in ranked: + lines = text.splitlines() + lower_lines = [ln.lower() for ln in lines] + for i, ln in enumerate(lower_lines): + if any(tok in ln for tok in q_tokens): + s = max(0, i - context_lines) + e = min(len(lines), i + context_lines + 1) + snippet = "\n".join(lines[s:e]) + if len(snippet) > max_chars: + snippet = snippet[:max_chars] + "..." + snippets.append({"path": _rel(p), "score": round(score, 2), "lines": f"L{s+1}-L{e}", "text": snippet}) + if len(snippets) >= limit: + break + if len(snippets) >= limit: + break + return ToolResult( + success=True, + result={"summary": f"Found {len(snippets)} snippets for '{query}'", "results": snippets, "query": query, "count": len(snippets)}, + ) + + return ToolResult(success=False, result=None, error=f"Unknown action: {action}") + def _file_csv_create(self, args: Dict[str, Any]) -> ToolResult: file_name = self._sanitize_file_name(args.get("file_name"), "export.csv", force_ext=".csv") headers = args.get("headers") or [] @@ -3807,15 +4462,116 @@ class ToolManager: logger.debug(f"Could not unload FLUX: {e}") async def _image_generate(self, args: Dict) -> ToolResult: - """Backward-compatible image generation entrypoint routed to Comfy (NODE3).""" + """Backward-compatible image generation with fallback chain.""" if not args.get("prompt"): return ToolResult(success=False, result=None, error="prompt is required") - comfy_args = dict(args) - comfy_args.setdefault("negative_prompt", "blurry, low quality, watermark") - comfy_args.setdefault("steps", 28) - comfy_args.setdefault("timeout_s", 180) - return await self._comfy_generate_image(comfy_args) + prepared = dict(args) + prepared.setdefault("negative_prompt", "blurry, low quality, watermark") + prepared.setdefault("steps", 28) + prepared.setdefault("timeout_s", 180) + + providers = [ + ("comfy", self._comfy_generate_image), + ("swapper", self._swapper_generate_image), + ("image_gen_service", self._image_gen_service_generate), + ] + errors: List[str] = [] + for name, handler in providers: + try: + result = await handler(dict(prepared)) + except Exception as e: + msg = f"{name}: {str(e)[:200]}" + logger.warning("image_generate provider exception: %s", msg) + errors.append(msg) + continue + if result.success: + return result + errors.append(f"{name}: {result.error or 'unknown error'}") + + return ToolResult( + success=False, + result=None, + error="All image providers failed: " + " | ".join(errors), + ) + + async def _swapper_generate_image(self, args: Dict) -> ToolResult: + """Generate image via swapper-service (local fallback).""" + prompt = args.get("prompt") + if not prompt: + return ToolResult(success=False, result=None, error="prompt is required") + payload = { + "model": args.get("model", "flux-klein-4b"), + "prompt": prompt, + "negative_prompt": args.get("negative_prompt", ""), + "num_inference_steps": int(args.get("steps", args.get("num_inference_steps", 28))), + "guidance_scale": float(args.get("guidance_scale", 4.0)), + "width": int(args.get("width", 1024)), + "height": int(args.get("height", 1024)), + } + timeout_s = max(30, int(args.get("timeout_s", 180))) + try: + resp = await self.http_client.post( + f"{self.swapper_url}/image/generate", + json=payload, + timeout=timeout_s, + ) + except Exception as e: + return ToolResult(success=False, result=None, error=f"Swapper request failed: {str(e)[:200]}") + if resp.status_code >= 400: + detail = (resp.text or "").strip()[:220] + return ToolResult(success=False, result=None, error=f"Swapper error {resp.status_code}: {detail}") + + data = resp.json() + image_base64 = data.get("image_base64") + if image_base64: + return ToolResult( + success=True, + result="✅ Зображення згенеровано через swapper-service", + image_base64=image_base64, + ) + if data.get("success"): + return ToolResult(success=True, result=json.dumps(data, ensure_ascii=False)) + return ToolResult(success=False, result=None, error="Swapper returned no image payload") + + async def _image_gen_service_generate(self, args: Dict) -> ToolResult: + """Generate image via image-gen-service as final fallback.""" + prompt = args.get("prompt") + if not prompt: + return ToolResult(success=False, result=None, error="prompt is required") + payload = { + "prompt": prompt, + "negative_prompt": args.get("negative_prompt"), + "width": int(args.get("width", 1024)), + "height": int(args.get("height", 1024)), + "num_inference_steps": int(args.get("steps", args.get("num_inference_steps", 25))), + "guidance_scale": float(args.get("guidance_scale", 3.5)), + } + seed = args.get("seed") + if seed is not None: + payload["seed"] = int(seed) + timeout_s = max(30, int(args.get("timeout_s", 240))) + try: + resp = await self.http_client.post( + f"{self.image_gen_service_url}/generate", + json=payload, + timeout=timeout_s, + ) + except Exception as e: + return ToolResult(success=False, result=None, error=f"Image-gen request failed: {str(e)[:200]}") + if resp.status_code >= 400: + detail = (resp.text or "").strip()[:220] + return ToolResult(success=False, result=None, error=f"Image-gen error {resp.status_code}: {detail}") + + data = resp.json() + image_base64 = data.get("image_base64") + if image_base64: + return ToolResult( + success=True, + result="✅ Зображення згенеровано через image-gen-service", + image_base64=image_base64, + ) + return ToolResult(success=False, result=None, error="Image-gen service returned no image payload") async def _poll_comfy_job(self, job_id: str, timeout_s: int = 180) -> Dict[str, Any]: """Poll Comfy Agent job status until terminal state or timeout.""" @@ -4009,6 +4765,7 @@ class ToolManager: "user_id": runtime_user_id, "fact_key": fact_key, "fact_value": fact, + "agent_id": agent_id, "fact_value_json": { "text": fact, "category": category, @@ -4249,46 +5006,86 @@ class ToolManager: return ToolResult(success=False, result=None, error=str(e)) async def _market_data(self, args: Dict) -> ToolResult: - """Query real-time market data from Market Data Service and SenpAI MD Consumer.""" + """Query real-time market data. Supports 23 symbols incl. XAU/XAG via Kraken.""" symbol = str(args.get("symbol", "BTCUSDT")).upper() query_type = str(args.get("query_type", "all")).lower() md_url = os.getenv("MARKET_DATA_URL", "http://dagi-market-data-node1:8891") consumer_url = os.getenv("SENPAI_CONSUMER_URL", "http://dagi-senpai-md-consumer-node1:8892") + binance_monitor_url = os.getenv("BINANCE_MONITOR_URL", "http://dagi-binance-bot-monitor-node1:8893") + + # Symbols served via binance-bot-monitor CCXT (not available in market-data-service WS) + CCXT_SYMBOLS = { + 'XAUUSDT', 'XAGUSDT', # Gold/Silver via Kraken + 'BNBUSDT', 'SOLUSDT', 'XRPUSDT', 'ADAUSDT', 'DOGEUSDT', + 'AVAXUSDT', 'DOTUSDT', 'LINKUSDT', 'POLUSDT', 'SHIBUSDT', + 'TRXUSDT', 'UNIUSDT', 'LTCUSDT', 'ATOMUSDT', 'NEARUSDT', + 'ICPUSDT', 'FILUSDT', 'APTUSDT', 'PAXGUSDT', + } results: Dict[str, Any] = {} try: - async with httpx.AsyncClient(timeout=8.0) as client: - if query_type in ("price", "all"): + async with httpx.AsyncClient(timeout=10.0) as client: + + # multi mode: return all 23 symbols at once + if query_type == "multi": try: - resp = await client.get(f"{md_url}/latest", params={"symbol": symbol}) + resp = await client.get(f"{binance_monitor_url}/prices", timeout=12.0) + if resp.status_code == 200: + return ToolResult(success=True, result=resp.text) + except Exception as e: + return ToolResult(success=False, result=None, error=f"multi prices fetch error: {e}") + + # For XAU/XAG/PAXG and extended symbols — use binance-bot-monitor CCXT + if symbol in CCXT_SYMBOLS or query_type == "price": + try: + resp = await client.get(f"{binance_monitor_url}/price", params={"symbol": symbol}, timeout=10.0) if resp.status_code == 200: data = resp.json() - trade = data.get("latest_trade", {}) or {} - quote = data.get("latest_quote", {}) or {} - bid = quote.get("bid") - ask = quote.get("ask") - spread = None - if isinstance(bid, (int, float)) and isinstance(ask, (int, float)): - spread = round(ask - bid, 6) results["price"] = { "symbol": symbol, - "last_price": trade.get("price"), - "size": trade.get("size"), - "side": trade.get("side"), - "bid": bid, - "ask": ask, - "spread": spread, - "provider": trade.get("provider"), - "timestamp": trade.get("ts_recv"), + "last_price": data.get("price"), + "bid": data.get("bid"), + "ask": data.get("ask"), + "volume_24h": data.get("volume_24h"), + "price_change_pct_24h": data.get("price_change_pct_24h"), + "high_24h": data.get("high_24h"), + "low_24h": data.get("low_24h"), + "exchange": data.get("exchange", "binance"), + "note": data.get("note"), + "error": data.get("error"), } else: - results["price_error"] = f"market-data status={resp.status_code}" + results["price_error"] = f"binance-monitor status={resp.status_code}" except Exception as e: results["price_error"] = str(e) - if query_type in ("features", "all"): + # For BTC/ETH (WS primary) also get features from senpai-consumer + if symbol in ('BTCUSDT', 'ETHUSDT') and query_type in ("features", "all"): + # WS price from market-data-service + if query_type == "all" and "price" not in results: + try: + resp = await client.get(f"{md_url}/latest", params={"symbol": symbol}) + if resp.status_code == 200: + data = resp.json() + trade = data.get("latest_trade", {}) or {} + quote = data.get("latest_quote", {}) or {} + bid = quote.get("bid") + ask = quote.get("ask") + spread = None + if isinstance(bid, (int, float)) and isinstance(ask, (int, float)): + spread = round(ask - bid, 6) + results["price"] = { + "symbol": symbol, + "last_price": trade.get("price"), + "bid": bid, "ask": ask, "spread": spread, + "provider": trade.get("provider"), + "timestamp": trade.get("ts_recv"), + } + except Exception as e: + results["price_ws_error"] = str(e) + # Features from senpai-consumer try: resp = await client.get(f"{consumer_url}/features/latest", params={"symbol": symbol}) if resp.status_code == 200: @@ -4301,17 +5098,34 @@ class ToolManager: "vwap_10s": round(float(feats.get("trade_vwap_10s", 0) or 0), 2), "vwap_60s": round(float(feats.get("trade_vwap_60s", 0) or 0), 2), "trade_count_10s": int(feats.get("trade_count_10s", 0) or 0), - "trade_volume_10s": round(float(feats.get("trade_volume_10s", 0) or 0), 4), "return_10s_pct": round(float(feats.get("return_10s", 0) or 0) * 100, 4), "realized_vol_60s_pct": round(float(feats.get("realized_vol_60s", 0) or 0) * 100, 6), - "latency_p50_ms": round(float(feats.get("latency_ms_p50", 0) or 0), 1), - "latency_p95_ms": round(float(feats.get("latency_ms_p95", 0) or 0), 1), } - else: - results["features_error"] = f"senpai-consumer status={resp.status_code}" except Exception as e: results["features_error"] = str(e) + elif symbol not in CCXT_SYMBOLS and "price" not in results: + # Fallback for any symbol: try market-data-service WS + try: + resp = await client.get(f"{md_url}/latest", params={"symbol": symbol}) + if resp.status_code == 200: + data = resp.json() + trade = data.get("latest_trade", {}) or {} + quote = data.get("latest_quote", {}) or {} + bid = quote.get("bid") + ask = quote.get("ask") + results["price"] = { + "symbol": symbol, + "last_price": trade.get("price"), + "bid": bid, "ask": ask, + "provider": trade.get("provider"), + "timestamp": trade.get("ts_recv"), + } + else: + results["price_error"] = f"market-data status={resp.status_code}" + except Exception as e: + results["price_error"] = str(e) + if not results: return ToolResult(success=False, result=None, error=f"No market data for {symbol}") @@ -4320,6 +5134,39 @@ class ToolManager: logger.error(f"Market data tool error: {e}") return ToolResult(success=False, result=None, error=str(e)) + async def _binance_bots_top(self, args: Dict) -> ToolResult: + """Get top Binance Marketplace bots via binance-bot-monitor service.""" + grid_type = str(args.get("grid_type", "SPOT")).upper() + limit = int(args.get("limit", 10)) + binance_monitor_url = os.getenv("BINANCE_MONITOR_URL", "http://dagi-binance-bot-monitor-node1:8893") + try: + async with httpx.AsyncClient(timeout=10.0) as client: + resp = await client.get(f"{binance_monitor_url}/top-bots", params={"grid_type": grid_type, "limit": limit}) + if resp.status_code == 200: + data = resp.json() + return ToolResult(success=True, result=json.dumps(data, ensure_ascii=False)) + else: + return ToolResult(success=False, result=None, error=f"binance-monitor status={resp.status_code}") + except Exception as e: + logger.error(f"binance_bots_top failed: {e}") + return ToolResult(success=False, result=None, error=str(e)) + + async def _binance_account_bots(self, args: Dict) -> ToolResult: + """Get own Binance sub-account bots and balances via binance-bot-monitor service.""" + force_refresh = bool(args.get("force_refresh", False)) + binance_monitor_url = os.getenv("BINANCE_MONITOR_URL", "http://dagi-binance-bot-monitor-node1:8893") + try: + async with httpx.AsyncClient(timeout=12.0) as client: + resp = await client.get(f"{binance_monitor_url}/account-bots", params={"force_refresh": str(force_refresh).lower()}) + if resp.status_code == 200: + data = resp.json() + return ToolResult(success=True, result=json.dumps(data, ensure_ascii=False)) + else: + return ToolResult(success=False, result=None, error=f"binance-monitor status={resp.status_code}") + except Exception as e: + logger.error(f"binance_account_bots failed: {e}") + return ToolResult(success=False, result=None, error=str(e)) + async def _oneok_http_call(self, base_url: str, path: str, payload: Dict[str, Any], method: str = "POST") -> ToolResult: url = f"{base_url}{path}" try: @@ -8832,341 +9679,3 @@ def format_tool_calls_for_response(tool_results: List[Dict], fallback_mode: str except Exception as e: logger.error(f"Job orchestrator error: {e}") return ToolResult(success=False, result=None, error=f"Internal error: {str(e)}") - - async def _kb_tool(self, args: Dict) -> ToolResult: - """ - Knowledge Base Tool - - Provides read-only access to organizational knowledge: - - ADR, architecture docs, runbooks, standards - - Search with ranking - - Snippets with context - - File opening with line ranges - - Security: - - Only allowlisted directories - - Path traversal protection - - Secrets redaction - - Read-only - """ - import os - import re - import fnmatch - from pathlib import Path - - action = (args or {}).get("action", "search") - params = (args or {}).get("params", {}) - - REPO_ROOT = os.getenv("REPO_ROOT", "/Users/apple/github-projects/microdao-daarion") - - ALLOWED_KB_PATHS = [ - "docs", - "runbooks", - "ops", - "adr", - "specs", - ] - - EXCLUDED_DIRS = { - "node_modules", "vendor", "dist", "build", ".git", - "__pycache__", ".pytest_cache", "venv", ".venv" - } - - SECRET_PATTERN = re.compile( - r'(?i)(api[_-]?key|secret|token|password|jwt|bearer)\s*[:=]\s*["\']?([a-zA-Z0-9_\-]{8,})' - ) - - def is_allowed_path(path: str) -> bool: - """Check if path is within allowed directories""" - normalized = os.path.normpath(path) - for allowed in ALLOWED_KB_PATHS: - if normalized.startswith(os.path.join(REPO_ROOT, allowed)): - return True - return False - - def is_excluded(path: str) -> bool: - """Check if path is in excluded directories""" - parts = Path(path).parts - return any(excluded in parts for excluded in EXCLUDED_DIRS) - - def redact_secrets(text: str) -> str: - """Redact secrets from text""" - def replacer(match): - key = match.group(1) - value = match.group(2) if match.lastindex >= 2 else "" - return f"{key}=***{'*' * min(len(value), 8)}" - return SECRET_PATTERN.sub(replacer, text) - - def tokenize(query: str) -> set: - """Simple tokenization""" - words = re.findall(r'\b\w+\b', query.lower()) - return set(words) - - def calculate_score(content: str, query_tokens: set, file_path: str) -> float: - """Calculate relevance score""" - content_lower = content.lower() - lines = content.split('\n') - - score = 0.0 - - for token in query_tokens: - count = content_lower.count(token) - score += count - - if query_tokens: - header_bonus = 0 - for i, line in enumerate(lines[:10]): - if line.strip().startswith('#'): - if any(tok in line.lower() for tok in query_tokens): - header_bonus += 5 - score += header_bonus - - if 'adr' in file_path.lower() or 'adr/' in file_path.lower(): - score *= 1.2 - - filename_bonus = sum(2 for token in query_tokens if token in Path(file_path).name.lower()) - score += filename_bonus - - if len(content) > 50000: - score *= 0.8 - - return score - - def search_files(query: str, paths: list, file_glob: str, limit: int) -> list: - """Search files for query""" - query_tokens = tokenize(query) - results = [] - - search_paths = paths if paths else ALLOWED_KB_PATHS - - for search_dir in search_paths: - full_path = os.path.join(REPO_ROOT, search_dir) - - if not os.path.exists(full_path): - continue - - for root, dirs, files in os.walk(full_path): - dirs[:] = [d for d in dirs if not is_excluded(os.path.join(root, d))] - - for filename in files: - if file_glob and not fnmatch.fnmatch(filename, file_glob): - continue - - if not filename.endswith(('.md', '.txt', '.yaml', '.yml', '.json')): - continue - - file_path = os.path.join(root, filename) - - if is_excluded(file_path): - continue - - try: - with open(file_path, 'r', encoding='utf-8', errors='ignore') as f: - content = f.read(100000) - - score = calculate_score(content, query_tokens, file_path) - - if score > 0: - rel_path = os.path.relpath(file_path, REPO_ROOT) - results.append({ - "path": rel_path, - "score": score, - "content": content - }) - except Exception: - continue - - results.sort(key=lambda x: x["score"], reverse=True) - return results[:limit] - - def extract_snippets(content: str, query: str, context_lines: int, max_chars: int) -> list: - """Extract snippets around matches""" - query_tokens = tokenize(query) - lines = content.split('\n') - snippets = [] - - for i, line in enumerate(lines): - line_lower = line.lower() - if any(token in line_lower for token in query_tokens): - start = max(0, i - context_lines) - end = min(len(lines), i + context_lines + 1) - - snippet_lines = lines[start:end] - snippet_text = '\n'.join(snippet_lines) - snippet_text = redact_secrets(snippet_text) - - if len(snippet_text) > max_chars: - snippet_text = snippet_text[:max_chars] + "..." - - snippets.append({ - "lines": f"L{start+1}-L{end}", - "text": snippet_text - }) - - if len(snippets) >= 10: - break - - return snippets - - def highlight_matches(text: str, query: str) -> list: - """Generate highlight strings""" - query_tokens = tokenize(query) - highlights = [] - - for token in query_tokens: - pattern = re.compile(re.escape(token), re.IGNORECASE) - for match in pattern.finditer(text): - start = max(0, match.start() - 30) - end = min(len(text), match.end() + 30) - snippet = "..." + text[start:end] + "..." - highlights.append(snippet) - break - - return highlights[:5] - - try: - if action == "search": - query = params.get("query", "") - if not query: - return ToolResult(success=False, result=None, error="query is required for search") - - paths = params.get("paths", []) - file_glob = params.get("file_glob", "**/*.md") - limit = params.get("limit", 20) - - results = search_files(query, paths, file_glob, limit) - - search_results = [] - for r in results: - highlights = highlight_matches(r["content"], query) - rel_path = r["path"] - search_results.append({ - "path": rel_path, - "score": round(r["score"], 2), - "highlights": highlights - }) - - return ToolResult(success=True, result={ - "summary": f"Found {len(search_results)} results for '{query}'", - "results": search_results, - "query": query, - "count": len(search_results) - }) - - elif action == "snippets": - query = params.get("query", "") - if not query: - return ToolResult(success=False, result=None, error="query is required for snippets") - - paths = params.get("paths", []) - limit = params.get("limit", 8) - context_lines = params.get("context_lines", 4) - max_chars = params.get("max_chars_per_snippet", 800) - - search_results = search_files(query, paths, "**/*.md", limit * 2) - - all_snippets = [] - for r in search_results[:limit]: - snippets = extract_snippets(r["content"], query, context_lines, max_chars) - for snippet in snippets: - all_snippets.append({ - "path": r["path"], - "score": round(r["score"], 2), - "lines": snippet["lines"], - "text": snippet["text"] - }) - - if len(all_snippets) >= limit * 3: - break - - all_snippets.sort(key=lambda x: x["score"], reverse=True) - all_snippets = all_snippets[:limit] - - return ToolResult(success=True, result={ - "summary": f"Found {len(all_snippets)} snippets for '{query}'", - "results": all_snippets, - "query": query, - "count": len(all_snippets) - }) - - elif action == "open": - file_path = params.get("path") - if not file_path: - return ToolResult(success=False, result=None, error="path is required") - - full_path = os.path.join(REPO_ROOT, file_path) - normalized = os.path.normpath(full_path) - - if ".." in normalized or not normalized.startswith(REPO_ROOT): - return ToolResult(success=False, result=None, error="Path traversal blocked") - - if not is_allowed_path(normalized): - return ToolResult(success=False, result=None, error="Path not in allowed directories") - - if not os.path.exists(normalized): - return ToolResult(success=False, result=None, error="File not found") - - start_line = params.get("start_line", 1) - end_line = params.get("end_line") - max_bytes = params.get("max_bytes", 200000) - - try: - with open(normalized, 'r', encoding='utf-8', errors='ignore') as f: - content = f.read(max_bytes) - - lines = content.split('\n') - - if end_line: - lines = lines[start_line-1:end_line] - else: - lines = lines[start_line-1:] - - content = '\n'.join(lines) - content = redact_secrets(content) - - return ToolResult(success=True, result={ - "path": file_path, - "start_line": start_line, - "end_line": end_line or len(lines) + start_line - 1, - "total_lines": len(lines), - "content": content, - "truncated": len(content) >= max_bytes - }) - except Exception as e: - return ToolResult(success=False, result=None, error=f"Cannot read file: {str(e)}") - - elif action == "sources": - paths = params.get("paths", ALLOWED_KB_PATHS) - - indexed_sources = [] - for search_dir in paths: - full_path = os.path.join(REPO_ROOT, search_dir) - - if not os.path.exists(full_path): - continue - - file_count = 0 - for root, dirs, files in os.walk(full_path): - dirs[:] = [d for d in dirs if not is_excluded(os.path.join(root, d))] - for f in files: - if f.endswith(('.md', '.txt', '.yaml', '.yml', '.json')): - file_count += 1 - - indexed_sources.append({ - "path": search_dir, - "file_count": file_count, - "status": "indexed" - }) - - return ToolResult(success=True, result={ - "summary": f"Found {len(indexed_sources)} indexed sources", - "sources": indexed_sources, - "allowed_paths": ALLOWED_KB_PATHS - }) - - else: - return ToolResult(success=False, result=None, error=f"Unknown action: {action}") - - except Exception as e: - logger.error(f"KB tool error: {e}") - return ToolResult(success=False, result=None, error=f"Internal error: {str(e)}")