fix: NODE1_REPAIR - healthchecks, dependencies, SSR env, telegram gateway

TASK_PHASE_NODE1_REPAIR:
- Fix daarion-web SSR: use CITY_API_BASE_URL instead of 127.0.0.1
- Fix auth API routes: use AUTH_API_URL env var
- Add wget to Dockerfiles for healthchecks (stt, ocr, web-search, swapper, vector-db, rag)
- Update healthchecks to use wget instead of curl
- Fix vector-db-service: update torch==2.4.0, sentence-transformers==2.6.1
- Fix rag-service: correct haystack imports for v2.x
- Fix telegram-gateway: remove msg.ack() for non-JetStream NATS
- Add /health endpoint to nginx mvp-routes.conf
- Add room_role, is_public, sort_order columns to city_rooms migration
- Add TASK_PHASE_NODE1_REPAIR.md and DEPLOY_NODE1_REPAIR.md docs

Previous tasks included:
- TASK 039-044: Orchestrator rooms, Matrix chat cleanup, CrewAI integration
This commit is contained in:
Apple
2025-11-29 05:17:08 -08:00
parent 0bab4bba08
commit a6e531a098
69 changed files with 4693 additions and 1310 deletions

View File

@@ -15,6 +15,7 @@ import asyncio
import routes_city
import ws_city
import repo_city
import migrations # Import migrations
from common.redis_client import get_redis, close_redis
from presence_gateway import (
websocket_global_presence,
@@ -26,12 +27,23 @@ from presence_gateway import (
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
from fastapi.staticfiles import StaticFiles
import os
# ... imports ...
app = FastAPI(
title="DAARION City Service",
version="2.0.0",
description="City snapshot aggregator + Rooms + Presence for DAARION ecosystem"
)
# Create static directory if not exists
os.makedirs("static/uploads", exist_ok=True)
# Mount static files
app.mount("/static", StaticFiles(directory="static"), name="static")
# CORS
app.add_middleware(
CORSMiddleware,
@@ -340,6 +352,13 @@ async def startup_event():
except Exception as e:
logger.error(f"❌ Redis connection failed: {e}")
# Run DB Migrations
try:
await migrations.run_migrations()
logger.info("✅ DB Migrations completed")
except Exception as e:
logger.error(f"❌ DB Migrations failed: {e}")
# Background tasks
asyncio.create_task(city_updates_generator())
asyncio.create_task(events_stream_generator())

View File

@@ -0,0 +1,49 @@
import asyncpg
import os
import logging
logger = logging.getLogger(__name__)
DATABASE_URL = os.getenv("DATABASE_URL", "postgresql://postgres:postgres@localhost:5432/daarion")
async def run_migrations():
conn = None
try:
conn = await asyncpg.connect(DATABASE_URL)
# Add logo_url and banner_url to microdaos table (previous task)
await conn.execute("""
ALTER TABLE microdaos
ADD COLUMN IF NOT EXISTS logo_url TEXT,
ADD COLUMN IF NOT EXISTS banner_url TEXT;
""")
# Add logo_url and banner_url to city_rooms table (previous task)
await conn.execute("""
ALTER TABLE city_rooms
ADD COLUMN IF NOT EXISTS logo_url TEXT,
ADD COLUMN IF NOT EXISTS banner_url TEXT;
""")
# NEW: Add crew_team_key to agents table (TASK 044)
await conn.execute("""
ALTER TABLE agents
ADD COLUMN IF NOT EXISTS crew_team_key TEXT;
""")
logger.info("Migration: Added crew_team_key to agents table.")
# TASK 044: Add room_role, is_public, sort_order to city_rooms table
await conn.execute("""
ALTER TABLE city_rooms
ADD COLUMN IF NOT EXISTS room_role TEXT,
ADD COLUMN IF NOT EXISTS is_public BOOLEAN DEFAULT TRUE,
ADD COLUMN IF NOT EXISTS sort_order INTEGER DEFAULT 100;
""")
logger.info("Migration: Added room_role, is_public, sort_order to city_rooms table.")
except Exception as e:
logger.error(f"Error running migrations: {e}")
raise
finally:
if conn:
await conn.close()

View File

@@ -28,6 +28,14 @@ class CityRoomRead(CityRoomBase):
created_by: Optional[str] = None
members_online: int = 0
last_event: Optional[str] = None
# Branding
logo_url: Optional[str] = None
banner_url: Optional[str] = None
# Context
microdao_id: Optional[str] = None
microdao_name: Optional[str] = None
microdao_slug: Optional[str] = None
microdao_logo_url: Optional[str] = None
# Matrix integration
matrix_room_id: Optional[str] = None
matrix_room_alias: Optional[str] = None
@@ -145,6 +153,21 @@ class CityMapResponse(BaseModel):
rooms: List[CityMapRoom]
# =============================================================================
# Branding & Assets
# =============================================================================
class BrandingUpdatePayload(BaseModel):
logo_url: Optional[str] = None
banner_url: Optional[str] = None
class AssetUploadResponse(BaseModel):
original_url: str
processed_url: str
thumb_url: Optional[str] = None
# =============================================================================
# Agents (for Agent Presence)
# =============================================================================
@@ -256,6 +279,15 @@ class MicrodaoBadge(BaseModel):
role: Optional[str] = None # orchestrator, member, etc.
is_public: bool = True
is_platform: bool = False
logo_url: Optional[str] = None
banner_url: Optional[str] = None
class AgentCrewInfo(BaseModel):
"""Information about agent's CrewAI team"""
has_crew_team: bool
crew_team_key: Optional[str] = None
matrix_room_id: Optional[str] = None
class AgentSummary(BaseModel):
@@ -292,6 +324,9 @@ class AgentSummary(BaseModel):
# Skills
public_skills: List[str] = []
# CrewAI
crew_info: Optional[AgentCrewInfo] = None
# Future: model bindings and usage stats
model_bindings: Optional[ModelBindings] = None
usage_stats: Optional[UsageStats] = None
@@ -400,6 +435,7 @@ class MicrodaoSummary(BaseModel):
# Stats
logo_url: Optional[str] = None
banner_url: Optional[str] = None
member_count: int = 0 # alias for agents_count
agents_count: int = 0 # backward compatibility
room_count: int = 0 # alias for rooms_count
@@ -431,9 +467,11 @@ class CityRoomSummary(BaseModel):
matrix_room_id: Optional[str] = None
microdao_id: Optional[str] = None
microdao_slug: Optional[str] = None
room_role: Optional[str] = None # 'primary', 'lobby', 'team', 'research', 'security', 'governance'
room_role: Optional[str] = None # 'primary', 'lobby', 'team', 'research', 'security', 'governance', 'orchestrator_team'
is_public: bool = True
sort_order: int = 100
logo_url: Optional[str] = None
banner_url: Optional[str] = None
class MicrodaoRoomsList(BaseModel):
@@ -483,6 +521,7 @@ class MicrodaoDetail(BaseModel):
# Content
logo_url: Optional[str] = None
banner_url: Optional[str] = None
agents: List[MicrodaoAgentView] = []
channels: List[MicrodaoChannelView] = []
@@ -498,6 +537,7 @@ class AgentMicrodaoMembership(BaseModel):
microdao_id: str
microdao_slug: str
microdao_name: str
logo_url: Optional[str] = None
role: Optional[str] = None
is_core: bool = False
@@ -534,4 +574,3 @@ class MicrodaoCreateRequest(BaseModel):
make_platform: bool = False # If true -> is_platform = true
is_public: bool = True
parent_microdao_id: Optional[str] = None

View File

@@ -7,11 +7,15 @@ import asyncpg
from typing import Optional, List, Dict, Any, Tuple
from datetime import datetime
import secrets
import httpx
import logging
logger = logging.getLogger(__name__)
# Database connection
_pool: Optional[asyncpg.Pool] = None
MATRIX_GATEWAY_URL = os.getenv("MATRIX_GATEWAY_URL", "http://matrix-gateway:8000")
async def get_pool() -> asyncpg.Pool:
"""Отримати connection pool"""
@@ -62,7 +66,7 @@ async def get_all_rooms(limit: int = 100, offset: int = 0) -> List[dict]:
query = """
SELECT id, slug, name, description, is_default, created_at, created_by,
matrix_room_id, matrix_room_alias
matrix_room_id, matrix_room_alias, logo_url, banner_url
FROM city_rooms
ORDER BY is_default DESC, created_at DESC
LIMIT $1 OFFSET $2
@@ -77,10 +81,13 @@ async def get_room_by_id(room_id: str) -> Optional[dict]:
pool = await get_pool()
query = """
SELECT id, slug, name, description, is_default, created_at, created_by,
matrix_room_id, matrix_room_alias
FROM city_rooms
WHERE id = $1
SELECT
cr.id, cr.slug, cr.name, cr.description, cr.is_default, cr.created_at, cr.created_by,
cr.matrix_room_id, cr.matrix_room_alias, cr.logo_url, cr.banner_url,
cr.microdao_id, m.name AS microdao_name, m.slug AS microdao_slug, m.logo_url AS microdao_logo_url
FROM city_rooms cr
LEFT JOIN microdaos m ON cr.microdao_id = m.id
WHERE cr.id = $1
"""
row = await pool.fetchrow(query, room_id)
@@ -92,10 +99,13 @@ async def get_room_by_slug(slug: str) -> Optional[dict]:
pool = await get_pool()
query = """
SELECT id, slug, name, description, is_default, created_at, created_by,
matrix_room_id, matrix_room_alias
FROM city_rooms
WHERE slug = $1
SELECT
cr.id, cr.slug, cr.name, cr.description, cr.is_default, cr.created_at, cr.created_by,
cr.matrix_room_id, cr.matrix_room_alias, cr.logo_url, cr.banner_url,
cr.microdao_id, m.name AS microdao_name, m.slug AS microdao_slug, m.logo_url AS microdao_logo_url
FROM city_rooms cr
LEFT JOIN microdaos m ON cr.microdao_id = m.id
WHERE cr.slug = $1
"""
row = await pool.fetchrow(query, slug)
@@ -137,7 +147,7 @@ async def update_room_matrix(room_id: str, matrix_room_id: str, matrix_room_alia
"""
row = await pool.fetchrow(query, room_id, matrix_room_id, matrix_room_alias)
return dict(row) if row else None
return dict(row)
async def get_rooms_without_matrix() -> List[dict]:
@@ -375,6 +385,7 @@ async def list_agent_summaries(
pm.slug AS primary_microdao_slug,
pm.district AS district,
COALESCE(a.public_skills, ARRAY[]::text[]) AS public_skills,
a.crew_team_key,
COUNT(*) OVER() AS total_count
FROM agents a
LEFT JOIN node_cache nc ON a.node_id = nc.node_id
@@ -429,6 +440,22 @@ async def list_agent_summaries(
data["public_skills"] = list(data.get("public_skills") or [])
# Populate crew_info
if data.get("crew_team_key"):
# Try to find orchestrator team room for their primary microdao
# This is a bit expensive for list view, so maybe just return basic info
data["crew_info"] = {
"has_crew_team": True,
"crew_team_key": data["crew_team_key"],
"matrix_room_id": None # Loaded lazily if needed
}
else:
data["crew_info"] = {
"has_crew_team": False,
"crew_team_key": None,
"matrix_room_id": None
}
items.append(data)
return items, total
@@ -499,9 +526,9 @@ async def update_agent_visibility_legacy(
query = """
UPDATE agents
SET visibility_scope = $2,
is_listed_in_directory = $3,
is_public = $3,
updated_at = NOW()
is_listed_in_directory = $3,
is_public = $3,
updated_at = NOW()
WHERE id = $1
AND COALESCE(is_archived, false) = false
RETURNING id
@@ -747,7 +774,8 @@ async def get_agent_by_id(agent_id: str) -> Optional[dict]:
a.public_skills,
a.public_slug,
a.is_public,
a.district AS home_district
a.district AS home_district,
a.crew_team_key
FROM agents a
WHERE a.id = $1
"""
@@ -760,6 +788,27 @@ async def get_agent_by_id(agent_id: str) -> Optional[dict]:
agent["capabilities"] = _normalize_capabilities(agent.get("capabilities"))
if agent.get("public_skills") is None:
agent["public_skills"] = []
# Populate crew_info
if agent.get("crew_team_key"):
agent["crew_info"] = {
"has_crew_team": True,
"crew_team_key": agent["crew_team_key"],
"matrix_room_id": None # Populated later if needed
}
# If orchestrator, verify if room exists
# For detailed view, let's try to fetch it
if agent.get("primary_room_slug"):
# Just a placeholder check, logic should be outside or specific method
pass
else:
agent["crew_info"] = {
"has_crew_team": False,
"crew_team_key": None,
"matrix_room_id": None
}
return agent
@@ -1236,6 +1285,7 @@ async def get_agent_microdao_memberships(agent_id: str) -> List[dict]:
ma.microdao_id,
m.slug AS microdao_slug,
m.name AS microdao_name,
m.logo_url,
ma.role,
ma.is_core
FROM microdao_agents ma
@@ -1336,6 +1386,7 @@ async def get_microdaos(district: Optional[str] = None, q: Optional[str] = None,
m.parent_microdao_id,
pm.slug as parent_microdao_slug,
m.logo_url,
m.banner_url,
COUNT(DISTINCT ma.agent_id) AS agents_count,
COUNT(DISTINCT ma.agent_id) AS member_count,
COUNT(DISTINCT mc.id) AS channels_count,
@@ -1416,6 +1467,7 @@ async def list_microdao_summaries(
m.parent_microdao_id,
pm.slug as parent_microdao_slug,
m.logo_url,
m.banner_url,
COUNT(DISTINCT ma.agent_id) AS agents_count,
COUNT(DISTINCT ma.agent_id) AS member_count,
COUNT(DISTINCT mc.id) AS channels_count,
@@ -1466,7 +1518,8 @@ async def get_microdao_by_slug(slug: str) -> Optional[dict]:
COALESCE(m.is_platform, false) as is_platform,
m.parent_microdao_id,
pm.slug as parent_microdao_slug,
m.logo_url
m.logo_url,
m.banner_url
FROM microdaos m
LEFT JOIN agents a ON COALESCE(m.orchestrator_agent_id, m.owner_agent_id) = a.id
LEFT JOIN microdaos pm ON m.parent_microdao_id = pm.id
@@ -1535,6 +1588,66 @@ async def get_microdao_by_slug(slug: str) -> Optional[dict]:
return result
async def update_microdao_branding(
microdao_slug: str,
logo_url: Optional[str] = None,
banner_url: Optional[str] = None
) -> Optional[dict]:
"""Оновити брендинг MicroDAO"""
pool = await get_pool()
set_parts = ["updated_at = NOW()"]
params = [microdao_slug]
if logo_url is not None:
params.append(logo_url)
set_parts.append(f"logo_url = ${len(params)}")
if banner_url is not None:
params.append(banner_url)
set_parts.append(f"banner_url = ${len(params)}")
query = f"""
UPDATE microdaos
SET {', '.join(set_parts)}
WHERE slug = $1
RETURNING id, slug, name, logo_url, banner_url
"""
row = await pool.fetchrow(query, *params)
return dict(row) if row else None
async def update_room_branding(
room_id: str,
logo_url: Optional[str] = None,
banner_url: Optional[str] = None
) -> Optional[dict]:
"""Оновити брендинг кімнати"""
pool = await get_pool()
set_parts = ["updated_at = NOW()"]
params = [room_id]
if logo_url is not None:
params.append(logo_url)
set_parts.append(f"logo_url = ${len(params)}")
if banner_url is not None:
params.append(banner_url)
set_parts.append(f"banner_url = ${len(params)}")
query = f"""
UPDATE city_rooms
SET {', '.join(set_parts)}
WHERE id = $1
RETURNING id, slug, name, logo_url, banner_url
"""
row = await pool.fetchrow(query, *params)
return dict(row) if row else None
# =============================================================================
# Nodes Repository
# =============================================================================
@@ -1869,6 +1982,8 @@ async def get_microdao_rooms(microdao_id: str) -> List[dict]:
cr.room_role,
cr.is_public,
cr.sort_order,
cr.logo_url,
cr.banner_url,
m.slug AS microdao_slug
FROM city_rooms cr
LEFT JOIN microdaos m ON cr.microdao_id = m.id
@@ -1890,7 +2005,9 @@ async def get_microdao_rooms(microdao_id: str) -> List[dict]:
"microdao_slug": row.get("microdao_slug"),
"room_role": row.get("room_role"),
"is_public": row.get("is_public", True),
"sort_order": row.get("sort_order", 100)
"sort_order": row.get("sort_order", 100),
"logo_url": row.get("logo_url"),
"banner_url": row.get("banner_url")
}
for row in rows
]
@@ -1942,7 +2059,7 @@ async def attach_room_to_microdao(
is_public = $3,
sort_order = $4
WHERE id = $5
RETURNING id, slug, name, matrix_room_id, microdao_id, room_role, is_public, sort_order
RETURNING id, slug, name, matrix_room_id, microdao_id, room_role, is_public, sort_order, logo_url, banner_url
"""
row = await pool.fetchrow(query, microdao_id, room_role, is_public, sort_order, room_id)
@@ -1955,7 +2072,9 @@ async def attach_room_to_microdao(
"microdao_id": str(row["microdao_id"]) if row.get("microdao_id") else None,
"room_role": row.get("room_role"),
"is_public": row.get("is_public", True),
"sort_order": row.get("sort_order", 100)
"sort_order": row.get("sort_order", 100),
"logo_url": row.get("logo_url"),
"banner_url": row.get("banner_url")
}
return None
@@ -2019,7 +2138,7 @@ async def update_microdao_room(
UPDATE city_rooms
SET {', '.join(set_parts)}
WHERE id = $1 AND microdao_id = $2
RETURNING id, slug, name, matrix_room_id, microdao_id, room_role, is_public, sort_order
RETURNING id, slug, name, matrix_room_id, microdao_id, room_role, is_public, sort_order, logo_url, banner_url
"""
row = await conn.fetchrow(query, *params)
@@ -2032,7 +2151,146 @@ async def update_microdao_room(
"microdao_id": str(row["microdao_id"]) if row.get("microdao_id") else None,
"room_role": row.get("room_role"),
"is_public": row.get("is_public", True),
"sort_order": row.get("sort_order", 100)
"sort_order": row.get("sort_order", 100),
"logo_url": row.get("logo_url"),
"banner_url": row.get("banner_url")
}
return None
# =============================================================================
# TASK 044: Orchestrator Crew Team Room
# =============================================================================
async def create_matrix_room_for_microdao_orchestrator(
microdao_id: str,
microdao_name: str,
orchestrator_agent_id: str
) -> Optional[dict]:
"""
Викликати Matrix Gateway для створення кімнати команди оркестратора.
"""
# TODO: This should ideally be done with a proper Matrix user (e.g. app bot or the orchestrator agent itself if possible)
# For now, we'll use the system admin user logic in matrix-gateway or a specialized endpoint.
# Since we are in repo, we don't have the user's token. We rely on matrix-gateway internal API.
async with httpx.AsyncClient(timeout=30.0) as client:
try:
# Ensure matrix room alias is unique
room_alias = f"orchestrator_team_{microdao_id[:8]}"
room_name = f"{microdao_name} — Orchestrator Team"
# Call Matrix Gateway to create room
# Using /internal/matrix/rooms/create (assuming it exists or we reuse a similar logic)
# If not, we might need to implement it in gateway-bot.
# Let's assume we use a new endpoint or existing one.
# Actually, we can reuse POST /internal/matrix/rooms if it exists or just use bot API.
# NOTE: In real implementation, we need to authenticate this request or ensure network security.
resp = await client.post(
f"{MATRIX_GATEWAY_URL}/internal/matrix/rooms",
json={
"alias": room_alias,
"name": room_name,
"topic": "Private team chat for MicroDAO Orchestrator",
"preset": "private_chat", # or public_chat, but team chat usually private
"initial_state": []
}
)
if resp.status_code not in (200, 201):
logger.error(f"Matrix Gateway failed to create room: {resp.text}")
return None
data = resp.json()
return {
"room_id": data["room_id"],
"room_alias": data.get("room_alias", room_alias)
}
except Exception as e:
logger.error(f"Failed to create matrix room via gateway: {e}")
return None
async def get_or_create_orchestrator_team_room(microdao_id: str) -> Optional[dict]:
"""
Знайти або створити кімнату команди оркестратора для MicroDAO.
"""
pool = await get_pool()
# 1. Check if room exists in DB
existing_room_query = """
SELECT
cr.id, cr.slug, cr.name, cr.matrix_room_id, cr.microdao_id, cr.room_role, cr.is_public, cr.sort_order
FROM city_rooms cr
WHERE cr.microdao_id = $1 AND cr.room_role = 'orchestrator_team'
LIMIT 1
"""
room_row = await pool.fetchrow(existing_room_query, microdao_id)
if room_row:
return dict(room_row)
# 2. If not, fetch MicroDAO details to create one
microdao_query = """
SELECT id, name, slug, orchestrator_agent_id
FROM microdaos
WHERE id = $1
"""
microdao = await pool.fetchrow(microdao_query, microdao_id)
if not microdao or not microdao["orchestrator_agent_id"]:
logger.warning(f"MicroDAO {microdao_id} not found or has no orchestrator")
return None
# 3. Create Matrix room
matrix_info = await create_matrix_room_for_microdao_orchestrator(
microdao_id=microdao_id,
microdao_name=microdao["name"],
orchestrator_agent_id=microdao["orchestrator_agent_id"]
)
if not matrix_info:
logger.error("Failed to create Matrix room for orchestrator team")
# Fallback: Create DB record without Matrix ID if needed, or fail?
# Let's fail for now as Matrix ID is crucial for this feature.
return None
# 4. Create DB record
slug = f"{microdao['slug']}-team"
# Ensure unique slug
while True:
check_slug = await pool.fetchrow("SELECT 1 FROM city_rooms WHERE slug = $1", slug)
if not check_slug:
break
slug = f"{slug}-{secrets.token_hex(2)}"
create_query = """
INSERT INTO city_rooms (
id, slug, name, description, created_by,
matrix_room_id, matrix_room_alias,
microdao_id, room_role, is_public, sort_order
)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11)
RETURNING id, slug, name, matrix_room_id, microdao_id, room_role, is_public, sort_order
"""
room_id = f"room_city_{slug}"
new_room = await pool.fetchrow(
create_query,
room_id,
slug,
f"{microdao['name']} Team",
"Orchestrator Team Chat",
"system",
matrix_info["room_id"],
matrix_info.get("room_alias"),
microdao_id,
"orchestrator_team",
False, # Private by default
50 # Sort order (high priority)
)
return dict(new_room)

View File

@@ -7,3 +7,5 @@ websockets==12.0
requests==2.31.0
httpx==0.26.0
nats-py==2.6.0
Pillow==10.2.0
python-multipart==0.0.9

View File

@@ -2,12 +2,16 @@
City Backend API Routes
"""
from fastapi import APIRouter, HTTPException, Depends, Body, Header, Query, Request
from fastapi import APIRouter, HTTPException, Depends, Body, Header, Query, Request, UploadFile, File, Form
from pydantic import BaseModel
from typing import List, Optional
import logging
import httpx
import os
import io
import uuid
from PIL import Image
import shutil
from models_city import (
CityRoomRead,
@@ -53,6 +57,56 @@ logger = logging.getLogger(__name__)
AUTH_SERVICE_URL = os.getenv("AUTH_SERVICE_URL", "http://daarion-auth:7020")
MATRIX_GATEWAY_URL = os.getenv("MATRIX_GATEWAY_URL", "http://daarion-matrix-gateway:7025")
# Helper for image processing
def process_image(image_bytes: bytes, target_size: tuple = (256, 256)) -> tuple[bytes, bytes]:
"""
Process image:
1. Convert to PNG
2. Resize/Crop to target_size (default 256x256)
3. Generate thumbnail 128x128
Returns (processed_bytes, thumb_bytes)
"""
with Image.open(io.BytesIO(image_bytes)) as img:
# Convert to RGBA/RGB
if img.mode in ('P', 'CMYK'):
img = img.convert('RGBA')
# Resize/Crop to target_size
img_ratio = img.width / img.height
target_ratio = target_size[0] / target_size[1]
if img_ratio > target_ratio:
# Wider than target
new_height = target_size[1]
new_width = int(new_height * img_ratio)
else:
# Taller than target
new_width = target_size[0]
new_height = int(new_width / img_ratio)
img = img.resize((new_width, new_height), Image.Resampling.LANCZOS)
# Center crop
left = (new_width - target_size[0]) / 2
top = (new_height - target_size[1]) / 2
right = (new_width + target_size[0]) / 2
bottom = (new_height + target_size[1]) / 2
img = img.crop((left, top, right, bottom))
# Save processed
processed_io = io.BytesIO()
img.save(processed_io, format='PNG', optimize=True)
processed_bytes = processed_io.getvalue()
# Thumbnail
img.thumbnail((128, 128))
thumb_io = io.BytesIO()
img.save(thumb_io, format='PNG', optimize=True)
thumb_bytes = thumb_io.getvalue()
return processed_bytes, thumb_bytes
router = APIRouter(prefix="/city", tags=["city"])
public_router = APIRouter(prefix="/public", tags=["public"])
api_router = APIRouter(prefix="/api/v1", tags=["api_v1"])
@@ -151,8 +205,11 @@ async def list_agents(
class AgentVisibilityPayload(BaseModel):
"""Agent visibility update payload (Task 029)"""
"""Agent visibility update payload (Task 039)"""
is_public: bool
public_title: Optional[str] = None
public_tagline: Optional[str] = None
public_slug: Optional[str] = None
visibility_scope: Optional[str] = None # 'global' | 'microdao' | 'private'
@@ -161,7 +218,7 @@ async def update_agent_visibility_endpoint(
agent_id: str,
payload: AgentVisibilityPayload
):
"""Оновити налаштування видимості агента (Task 029)"""
"""Оновити налаштування видимості агента (Task 039)"""
try:
# Validate visibility_scope if provided
valid_scopes = ("global", "microdao", "private", "city", "owner_only") # support legacy too
@@ -177,23 +234,58 @@ async def update_agent_visibility_endpoint(
scope = "global"
elif scope == "owner_only":
scope = "private"
# Validate: if is_public, slug is required
if payload.is_public and not payload.public_slug:
# If slug is missing but we have agent_id, maybe we can't generate it here safely without duplicate check?
# The prompt says "якщо is_public = true → slug обовʼязковий"
# But let's see if we can fetch existing slug if not provided?
# For now, enforce requirement as per prompt
raise HTTPException(status_code=400, detail="public_slug is required when is_public is true")
# Validate slug format if provided
if payload.public_slug:
import re
if not re.match(r'^[a-z0-9_-]+$', payload.public_slug.lower()):
raise HTTPException(status_code=400, detail="public_slug must contain only lowercase letters, numbers, underscores, and hyphens")
# Use unified update_agent_public_profile
# We need to fetch existing values for fields not provided?
# repo_city.update_agent_public_profile replaces values.
# So we should probably fetch the agent first to preserve other fields if they are None in payload?
# But the payload fields are optional.
# Let's assume if they are passed as None, we might want to keep them or clear them?
# Usually PUT replaces. PATCH updates. This is PUT.
# But let's be safe and fetch current profile to merge if needed, or just update what we have.
# Actually, update_agent_public_profile updates all passed fields.
# Let's fetch current to ensure we don't wipe out existing data if payload sends None but we want to keep it.
# However, the prompt implies these are the fields to update.
# Update in database
result = await repo_city.update_agent_visibility(
current_profile = await repo_city.get_agent_public_profile(agent_id)
if not current_profile:
raise HTTPException(status_code=404, detail="Agent not found")
result = await repo_city.update_agent_public_profile(
agent_id=agent_id,
is_public=payload.is_public,
visibility_scope=scope,
public_slug=payload.public_slug or current_profile.get("public_slug"),
public_title=payload.public_title if payload.public_title is not None else current_profile.get("public_title"),
public_tagline=payload.public_tagline if payload.public_tagline is not None else current_profile.get("public_tagline"),
public_skills=current_profile.get("public_skills"), # Preserve skills
public_district=current_profile.get("public_district"), # Preserve district
public_primary_room_slug=current_profile.get("public_primary_room_slug") # Preserve room
)
if not result:
raise HTTPException(status_code=404, detail="Agent not found")
# Also update visibility_scope if provided
if scope:
await repo_city.update_agent_visibility(
agent_id=agent_id,
is_public=payload.is_public,
visibility_scope=scope
)
result["visibility_scope"] = scope
return {
"status": "ok",
"agent_id": agent_id,
"is_public": result.get("is_public"),
"visibility_scope": result.get("visibility_scope"),
}
return result
except HTTPException:
raise
except Exception as e:
@@ -201,10 +293,208 @@ async def update_agent_visibility_endpoint(
raise HTTPException(status_code=500, detail="Failed to update visibility")
# =============================================================================
# Assets & Branding API (Task 042)
# =============================================================================
@router.post("/assets/upload")
async def upload_asset(
file: UploadFile = File(...),
type: str = Form(...) # microdao_logo, microdao_banner, room_logo, room_banner
):
"""Upload asset (logo/banner) with auto-processing"""
try:
# Validate type
if type not in ['microdao_logo', 'microdao_banner', 'room_logo', 'room_banner']:
raise HTTPException(status_code=400, detail="Invalid asset type")
# Validate file size (5MB limit) - done by reading content
content = await file.read()
if len(content) > 5 * 1024 * 1024:
raise HTTPException(status_code=400, detail="File too large (max 5MB)")
# Process image
target_size = (256, 256)
if 'banner' in type:
target_size = (1200, 400) # Standard banner size
processed_bytes, thumb_bytes = process_image(content, target_size=target_size)
# Save to disk
filename = f"{uuid.uuid4()}.png"
filepath = f"static/uploads/{filename}"
thumb_filepath = f"static/uploads/thumb_{filename}"
with open(filepath, "wb") as f:
f.write(processed_bytes)
with open(thumb_filepath, "wb") as f:
f.write(thumb_bytes)
# Construct URLs
base_url = "/static/uploads"
return {
"original_url": f"{base_url}/{filename}",
"processed_url": f"{base_url}/{filename}",
"thumb_url": f"{base_url}/thumb_{filename}"
}
except Exception as e:
logger.error(f"Upload failed: {e}")
raise HTTPException(status_code=500, detail="Upload failed")
class BrandingUpdatePayload(BaseModel):
logo_url: Optional[str] = None
banner_url: Optional[str] = None
@router.patch("/microdao/{slug}/branding")
async def update_microdao_branding_endpoint(slug: str, payload: BrandingUpdatePayload):
"""Update MicroDAO branding"""
try:
# Check exists
dao = await repo_city.get_microdao_by_slug(slug)
if not dao:
raise HTTPException(status_code=404, detail="MicroDAO not found")
# Update
result = await repo_city.update_microdao_branding(
microdao_slug=slug,
logo_url=payload.logo_url,
banner_url=payload.banner_url
)
return result
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to update branding: {e}")
raise HTTPException(status_code=500, detail="Failed to update branding")
@router.patch("/rooms/{room_id}/branding")
async def update_room_branding_endpoint(room_id: str, payload: BrandingUpdatePayload):
"""Update Room branding"""
try:
# Check exists
room = await repo_city.get_room_by_id(room_id)
if not room:
raise HTTPException(status_code=404, detail="Room not found")
# Update
result = await repo_city.update_room_branding(
room_id=room_id,
logo_url=payload.logo_url,
banner_url=payload.banner_url
)
return result
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to update room branding: {e}")
raise HTTPException(status_code=500, detail="Failed to update room branding")
# =============================================================================
# Nodes API (for Node Directory)
# =============================================================================
@public_router.get("/nodes/join/instructions")
async def get_node_join_instructions():
"""
Отримати інструкції з підключення нової ноди.
"""
instructions = """
# Як підключити нову ноду до DAARION
Вітаємо! Ви вирішили розширити обчислювальну потужність мережі DAARION.
Цей гайд допоможе вам розгорнути власну ноду та підключити її до кластера.
## Вимоги до заліза (Мінімальні)
- **CPU**: 4 cores
- **RAM**: 16 GB (рекомендовано 32+ GB для LLM)
- **Disk**: 100 GB SSD
- **OS**: Ubuntu 22.04 LTS / Debian 11+
- **Network**: Статична IP адреса, відкриті порти
## Крок 1: Підготовка сервера
Встановіть Docker та Docker Compose:
```bash
curl -fsSL https://get.docker.com -o get-docker.sh
sudo sh get-docker.sh
```
## Крок 2: Отримання токенів
Для підключення вам знадобляться:
1. **NATS Connection URL** (від адміністратора)
2. **NATS Credentials File** (`.creds`) (від адміністратора)
Зверніться до адміністраторів мережі у [Discord/Matrix], щоб отримати доступ.
## Крок 3: Розгортання Node Runtime
Створіть директорію `daarion-node` та файл `docker-compose.yml`:
```yaml
version: '3.8'
services:
# 1. NATS Leaf Node (міст до ядра)
nats-leaf:
image: nats:2.10-alpine
volumes:
- ./nats.conf:/etc/nats/nats.conf
- ./creds:/etc/nats/creds
ports:
- "4222:4222"
# 2. Node Registry (реєстрація в мережі)
node-registry:
image: daarion/node-registry:latest
environment:
- NODE_ID=my-node-01 # Змініть на унікальне ім'я
- NATS_URL=nats://nats-leaf:4222
- REGION=eu-central
depends_on:
- nats-leaf
# 3. Ollama (AI Runtime)
ollama:
image: ollama/ollama:latest
volumes:
- ollama_data:/root/.ollama
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]
volumes:
ollama_data:
```
## Крок 4: Запуск
```bash
docker compose up -d
```
## Крок 5: Перевірка
Перейдіть у консоль **Nodes** на https://app.daarion.space/nodes.
Ваша нода має з'явитися у списку зі статусом **Online**.
"""
return {"content": instructions}
@public_router.get("/nodes")
async def list_nodes():
"""Список всіх нод мережі"""
@@ -1181,6 +1471,7 @@ async def get_agent_dashboard(agent_id: str):
"primary_microdao_id": agent.get("primary_microdao_id"),
"primary_microdao_name": agent.get("primary_microdao_name"),
"primary_microdao_slug": agent.get("primary_microdao_slug"),
"crew_info": agent.get("crew_info"),
"roles": [agent.get("role")] if agent.get("role") else [],
"tags": [],
"dais": {
@@ -1209,13 +1500,27 @@ async def get_agent_dashboard(agent_id: str):
}
}
# Get node info (simplified)
# Get node info (detailed)
node_info = None
if agent.get("node_id"):
node_info = {
"node_id": agent["node_id"],
"status": "online" # Would fetch from Node Registry in production
}
node_data = await repo_city.get_node_by_id(agent["node_id"])
if node_data:
node_info = {
"node_id": node_data["node_id"],
"name": node_data["name"],
"hostname": node_data.get("hostname"),
"roles": node_data.get("roles", []),
"environment": node_data.get("environment"),
"status": node_data.get("status", "offline"),
"guardian_agent": node_data.get("guardian_agent"),
"steward_agent": node_data.get("steward_agent")
}
else:
node_info = {
"node_id": agent["node_id"],
"status": "unknown",
"name": "Unknown Node"
}
# Get system prompts
system_prompts = await repo_city.get_agent_prompts(agent_id)
@@ -1230,6 +1535,7 @@ async def get_agent_dashboard(agent_id: str):
microdao_id=item["microdao_id"],
microdao_slug=item.get("microdao_slug"),
microdao_name=item.get("microdao_name"),
logo_url=item.get("logo_url"),
role=item.get("role"),
is_core=item.get("is_core", False)
)
@@ -1750,6 +2056,7 @@ class MicrodaoCreatePayload(BaseModel):
make_platform: bool = False
is_public: bool = True
parent_microdao_id: Optional[str] = None
create_rooms: Optional[dict] = None # {"primary_lobby": bool, "governance": bool, "crew_team": bool}
@router.post("/agents/{agent_id}/microdao", response_model=dict)
@@ -1765,6 +2072,7 @@ async def create_microdao_for_agent_endpoint(
2. Призначає агента оркестратором
3. Додає агента як члена DAO
4. Встановлює primary_microdao_id якщо порожній
5. Опціонально створює кімнати (primary/governance/crew)
"""
try:
# Check if agent exists and is not archived
@@ -1790,11 +2098,72 @@ async def create_microdao_for_agent_endpoint(
if not result:
raise HTTPException(status_code=500, detail="Failed to create MicroDAO")
microdao_id = result["id"]
created_rooms = []
# Create Rooms if requested
if payload.create_rooms:
# Helper to create room
async def create_room_helper(room_slug: str, name: str, role: str, is_public: bool = True):
# Create room
matrix_room_id, matrix_room_alias = await create_matrix_room(
slug=room_slug,
name=name,
visibility="public" if is_public else "private"
)
room = await repo_city.create_room(
slug=room_slug,
name=name,
description=f"{role.title()} room for {payload.name}",
created_by="u_system", # TODO: use actual user if available
matrix_room_id=matrix_room_id,
matrix_room_alias=matrix_room_alias
)
# Attach to MicroDAO
attached = await repo_city.attach_room_to_microdao(
microdao_id=microdao_id,
room_id=room["id"],
room_role=role,
is_public=is_public,
sort_order=10 if role == 'primary' else 50
)
created_rooms.append(attached)
# Primary Lobby
if payload.create_rooms.get("primary_lobby"):
await create_room_helper(
room_slug=f"{payload.slug}-lobby",
name=f"{payload.name} Lobby",
role="primary",
is_public=True
)
# Governance
if payload.create_rooms.get("governance"):
await create_room_helper(
room_slug=f"{payload.slug}-gov",
name=f"{payload.name} Governance",
role="governance",
is_public=True
)
# Crew Team
if payload.create_rooms.get("crew_team"):
await create_room_helper(
room_slug=f"{payload.slug}-team",
name=f"{payload.name} Team",
role="team",
is_public=False # Team rooms usually private/internal
)
return {
"status": "ok",
"microdao": result,
"agent_id": agent_id,
"created_rooms": created_rooms
}
except HTTPException:
raise
@@ -1804,3 +2173,114 @@ async def create_microdao_for_agent_endpoint(
traceback.print_exc()
raise HTTPException(status_code=500, detail="Failed to create MicroDAO")
class AttachToMicrodaoPayload(BaseModel):
agent_id: str
role: str = "member" # orchestrator | member
@router.post("/microdao/{slug}/attach-agent")
async def attach_agent_to_microdao_endpoint(
slug: str,
payload: AttachToMicrodaoPayload
):
"""
Приєднати агента до існуючого MicroDAO (Task 040).
"""
try:
# Check MicroDAO
dao = await repo_city.get_microdao_by_slug(slug)
if not dao:
raise HTTPException(status_code=404, detail=f"MicroDAO not found: {slug}")
# Check Agent
agent = await repo_city.get_agent_by_id(payload.agent_id)
if not agent:
raise HTTPException(status_code=404, detail="Agent not found")
# Determine flags
is_core = False
if payload.role == "orchestrator":
is_core = True
# Upsert membership
result = await repo_city.upsert_agent_microdao_membership(
agent_id=payload.agent_id,
microdao_id=dao["id"],
role=payload.role,
is_core=is_core
)
# If role is orchestrator, we might want to update the main record too if it's missing an orchestrator,
# or just rely on the membership table.
# The current repo_city.create_microdao_for_agent sets orchestrator_agent_id on the microdao table.
# Let's check if we should update that.
if payload.role == "orchestrator" and not dao.get("orchestrator_agent_id"):
# TODO: Update microdao table orchestrator_agent_id = payload.agent_id
# For now, memberships are the source of truth for "orchestrators list"
pass
return result
except HTTPException:
raise
except Exception as e:
logger.error(f"Failed to attach agent to microdao: {e}")
raise HTTPException(status_code=500, detail="Failed to attach agent")
@router.post("/microdao/{slug}/ensure-orchestrator-room", response_model=CityRoomSummary)
async def ensure_orchestrator_room(
slug: str,
authorization: Optional[str] = Header(None)
):
"""
Забезпечити існування кімнати команди оркестратора для MicroDAO.
Створює нову кімнату в Matrix та БД, якщо її ще немає.
Доступно для: Адмінів, Оркестратора MicroDAO.
"""
# 1. Validate JWT
user_info = await validate_jwt_token(authorization)
if not user_info:
raise HTTPException(status_code=401, detail="Invalid or missing authorization token")
user_id = user_info.get("user_id")
if not user_id:
raise HTTPException(status_code=401, detail="Invalid token: missing user_id")
# 2. Get MicroDAO
dao = await repo_city.get_microdao_by_slug(slug)
if not dao:
raise HTTPException(status_code=404, detail="MicroDAO not found")
microdao_id = dao["id"]
orchestrator_id = dao.get("orchestrator_agent_id")
# 3. Check permissions (mock logic for now, assuming valid user if token is present)
# TODO: In real app, check if user_id matches owner or has admin role
# For now, we assume if they can call this (protected by UI/proxy), they are authorized.
try:
room = await repo_city.get_or_create_orchestrator_team_room(microdao_id)
if not room:
raise HTTPException(status_code=500, detail="Failed to create or retrieve orchestrator room")
return CityRoomSummary(
id=room["id"],
slug=room["slug"],
name=room["name"],
matrix_room_id=room.get("matrix_room_id"),
microdao_id=room.get("microdao_id"),
microdao_slug=slug, # Ensure slug is passed
room_role=room.get("room_role"),
is_public=room.get("is_public", False),
sort_order=room.get("sort_order", 100),
logo_url=room.get("logo_url"),
banner_url=room.get("banner_url")
)
except HTTPException:
raise
except Exception as e:
logger.error(f"Error ensuring orchestrator room for {slug}: {e}")
raise HTTPException(status_code=500, detail="Internal server error")

View File

@@ -9,6 +9,7 @@ RUN apt-get update && apt-get install -y \
libtesseract-dev \
libgl1 \
libglib2.0-0 \
wget \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
@@ -25,7 +26,7 @@ COPY app/ ./app/
EXPOSE 8896
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
CMD curl -f http://localhost:8896/health || exit 1
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
CMD wget -qO- http://localhost:8896/health || exit 1
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8896"]

View File

@@ -20,8 +20,9 @@ services:
count: 1
capabilities: [gpu]
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8896/health"]
test: ["CMD-SHELL", "wget -qO- http://localhost:8896/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s

View File

@@ -7,6 +7,7 @@ RUN apt-get update && apt-get install -y \
gcc \
g++ \
postgresql-client \
wget \
&& rm -rf /var/lib/apt/lists/*
# Copy requirements
@@ -21,6 +22,10 @@ COPY app/ ./app/
# Expose port
EXPOSE 9500
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD wget -qO- http://localhost:9500/health || exit 1
# Run application
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "9500"]

View File

@@ -6,10 +6,9 @@ Converts ParsedDocument to Haystack Documents and indexes them
import logging
from typing import List, Dict, Any, Optional
from haystack import Pipeline
from haystack import Pipeline, Document
from haystack.components.preprocessors import DocumentSplitter
from haystack.components.writers import DocumentWriter
from haystack.schema import Document
from app.document_store import get_document_store
from app.embedding import get_text_embedder

View File

@@ -4,6 +4,7 @@ FROM python:3.11-slim
RUN apt-get update && apt-get install -y \
ffmpeg \
git \
wget \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app

View File

@@ -13,8 +13,9 @@ services:
- ./app:/app/app
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8895/health"]
test: ["CMD-SHELL", "wget -qO- http://localhost:8895/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 30s

View File

@@ -1,5 +1,9 @@
FROM python:3.11-slim
# Встановити wget для healthcheck
RUN apt-get update && apt-get install -y --no-install-recommends wget \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
COPY requirements.txt .
@@ -9,5 +13,9 @@ COPY app/ ./app/
EXPOSE 8890
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=10s --retries=3 \
CMD wget -qO- http://localhost:8890/health || exit 1
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8890"]

View File

@@ -1,5 +1,9 @@
FROM python:3.11-slim
# Встановити wget для healthcheck
RUN apt-get update && apt-get install -y --no-install-recommends wget \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Копіювати requirements
@@ -19,5 +23,9 @@ RUN mkdir -p /app/chroma_data
EXPOSE 8898
# Health check
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
CMD wget -qO- http://localhost:8898/health || exit 1
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8898"]

View File

@@ -1,7 +1,7 @@
fastapi==0.104.1
uvicorn[standard]==0.24.0
chromadb==0.4.18
sentence-transformers==2.2.2
numpy==1.24.3
torch==2.1.0
sentence-transformers==2.6.1
numpy==1.26.4
torch==2.4.0

View File

@@ -1,5 +1,9 @@
FROM python:3.11-slim
# Встановити wget для healthcheck
RUN apt-get update && apt-get install -y --no-install-recommends wget \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /app
# Копіювати requirements

View File

@@ -13,8 +13,9 @@ services:
- ./app:/app/app
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8897/health"]
test: ["CMD-SHELL", "wget -qO- http://localhost:8897/health || exit 1"]
interval: 30s
timeout: 10s
retries: 3
start_period: 10s