🧠 Add Agent Memory System with PostgreSQL + Qdrant + Cohere

Features:
- Three-tier memory architecture (short/mid/long-term)
- PostgreSQL schema for conversations, events, memories
- Qdrant vector database for semantic search
- Cohere embeddings (embed-multilingual-v3.0, 1024 dims)
- FastAPI Memory Service with full CRUD
- External Secrets integration with Vault
- Kubernetes deployment manifests

Components:
- infrastructure/database/agent-memory-schema.sql
- infrastructure/kubernetes/apps/qdrant/
- infrastructure/kubernetes/apps/memory-service/
- services/memory-service/ (FastAPI app)

Also includes:
- External Secrets Operator
- Traefik Ingress Controller
- Cert-Manager with Let's Encrypt
- ArgoCD for GitOps
This commit is contained in:
Apple
2026-01-10 07:52:32 -08:00
parent 12545a7c76
commit 90758facae
16 changed files with 2769 additions and 579 deletions

View File

@@ -0,0 +1,257 @@
# DAARION Network - PostgreSQL HA Setup
# Patroni + PgBouncer + pgBackRest + Monitoring
---
# =============================================================================
# POSTGRESQL HA WITH PATRONI
# =============================================================================
- name: Setup PostgreSQL HA Cluster
hosts: database_nodes
become: yes
vars:
# Patroni
patroni_version: "3.2.0"
patroni_scope: "daarion-cluster"
patroni_namespace: "/daarion"
# PostgreSQL
postgres_version: "16"
postgres_data_dir: "/var/lib/postgresql/{{ postgres_version }}/main"
postgres_config_dir: "/etc/postgresql/{{ postgres_version }}/main"
# PgBouncer
pgbouncer_port: 6432
pgbouncer_max_client_conn: 1000
pgbouncer_default_pool_size: 50
# Backup
pgbackrest_repo_path: "/var/lib/pgbackrest"
pgbackrest_s3_bucket: "daarion-backups"
# Consul for DCS
consul_host: "{{ hostvars[groups['masters'][0]].ansible_host }}"
tasks:
# =========================================================================
# PREREQUISITES
# =========================================================================
- name: Add PostgreSQL APT repository
shell: |
curl -fsSL https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor -o /usr/share/keyrings/postgresql-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/postgresql-keyring.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list
args:
creates: /etc/apt/sources.list.d/pgdg.list
- name: Update apt cache
apt:
update_cache: yes
- name: Install PostgreSQL and dependencies
apt:
name:
- postgresql-{{ postgres_version }}
- postgresql-contrib-{{ postgres_version }}
- python3-pip
- python3-psycopg2
- python3-consul
- pgbouncer
- pgbackrest
state: present
# =========================================================================
# PATRONI INSTALLATION
# =========================================================================
- name: Install Patroni
pip:
name:
- patroni[consul]=={{ patroni_version }}
- python-consul
state: present
- name: Create Patroni directories
file:
path: "{{ item }}"
state: directory
owner: postgres
group: postgres
mode: '0750'
loop:
- /etc/patroni
- /var/log/patroni
- name: Configure Patroni
template:
src: templates/patroni.yml.j2
dest: /etc/patroni/patroni.yml
owner: postgres
group: postgres
mode: '0640'
notify: restart patroni
- name: Create Patroni systemd service
copy:
dest: /etc/systemd/system/patroni.service
content: |
[Unit]
Description=Patroni PostgreSQL Cluster Manager
After=network.target consul.service
[Service]
Type=simple
User=postgres
Group=postgres
ExecStart=/usr/local/bin/patroni /etc/patroni/patroni.yml
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
TimeoutSec=30
Restart=on-failure
[Install]
WantedBy=multi-user.target
notify:
- reload systemd
- restart patroni
# =========================================================================
# PGBOUNCER
# =========================================================================
- name: Configure PgBouncer
template:
src: templates/pgbouncer.ini.j2
dest: /etc/pgbouncer/pgbouncer.ini
owner: postgres
group: postgres
mode: '0640'
notify: restart pgbouncer
- name: Configure PgBouncer userlist
copy:
dest: /etc/pgbouncer/userlist.txt
content: |
"{{ postgres_user }}" "{{ postgres_password }}"
"pgbouncer" "{{ pgbouncer_password | default('pgbouncer_secret') }}"
owner: postgres
group: postgres
mode: '0600'
notify: restart pgbouncer
- name: Enable PgBouncer
service:
name: pgbouncer
enabled: yes
state: started
# =========================================================================
# PGBACKREST
# =========================================================================
- name: Create pgBackRest directories
file:
path: "{{ item }}"
state: directory
owner: postgres
group: postgres
mode: '0750'
loop:
- "{{ pgbackrest_repo_path }}"
- /var/log/pgbackrest
- name: Configure pgBackRest
template:
src: templates/pgbackrest.conf.j2
dest: /etc/pgbackrest.conf
owner: postgres
group: postgres
mode: '0640'
- name: Setup backup cron
cron:
name: "Daily PostgreSQL backup"
hour: "2"
minute: "0"
user: postgres
job: "pgbackrest --stanza={{ patroni_scope }} --type=diff backup >> /var/log/pgbackrest/backup.log 2>&1"
- name: Setup weekly full backup
cron:
name: "Weekly full PostgreSQL backup"
weekday: "0"
hour: "3"
minute: "0"
user: postgres
job: "pgbackrest --stanza={{ patroni_scope }} --type=full backup >> /var/log/pgbackrest/backup.log 2>&1"
# =========================================================================
# MONITORING
# =========================================================================
- name: Install postgres_exporter
shell: |
curl -sL https://github.com/prometheus-community/postgres_exporter/releases/download/v0.15.0/postgres_exporter-0.15.0.linux-amd64.tar.gz | tar xz
mv postgres_exporter-0.15.0.linux-amd64/postgres_exporter /usr/local/bin/
rm -rf postgres_exporter-0.15.0.linux-amd64
args:
creates: /usr/local/bin/postgres_exporter
- name: Create postgres_exporter systemd service
copy:
dest: /etc/systemd/system/postgres_exporter.service
content: |
[Unit]
Description=Prometheus PostgreSQL Exporter
After=network.target postgresql.service
[Service]
Type=simple
User=postgres
Environment="DATA_SOURCE_NAME=postgresql://{{ postgres_user }}:{{ postgres_password }}@localhost:5432/{{ postgres_db }}?sslmode=disable"
ExecStart=/usr/local/bin/postgres_exporter --web.listen-address=:9187
Restart=on-failure
[Install]
WantedBy=multi-user.target
notify:
- reload systemd
- restart postgres_exporter
- name: Enable postgres_exporter
service:
name: postgres_exporter
enabled: yes
state: started
# =========================================================================
# VERIFICATION
# =========================================================================
- name: Show PostgreSQL HA status
debug:
msg: |
PostgreSQL HA Setup Complete!
Components:
- Patroni: Cluster management
- PgBouncer: Connection pooling (port {{ pgbouncer_port }})
- pgBackRest: Backups
- postgres_exporter: Metrics (port 9187)
Connection strings:
- Direct: postgresql://{{ postgres_user }}@{{ ansible_host }}:5432/{{ postgres_db }}
- Pooled: postgresql://{{ postgres_user }}@{{ ansible_host }}:{{ pgbouncer_port }}/{{ postgres_db }}
handlers:
- name: reload systemd
systemd:
daemon_reload: yes
- name: restart patroni
service:
name: patroni
state: restarted
- name: restart pgbouncer
service:
name: pgbouncer
state: restarted
- name: restart postgres_exporter
service:
name: postgres_exporter
state: restarted

View File

@@ -0,0 +1,120 @@
# Patroni Configuration for {{ inventory_hostname }}
# Generated by Ansible
scope: {{ patroni_scope }}
namespace: {{ patroni_namespace }}
name: {{ inventory_hostname }}
restapi:
listen: 0.0.0.0:8008
connect_address: {{ ansible_host }}:8008
consul:
host: {{ consul_host }}:8500
register_service: true
bootstrap:
dcs:
ttl: 30
loop_wait: 10
retry_timeout: 10
maximum_lag_on_failover: 1048576
postgresql:
use_pg_rewind: true
use_slots: true
parameters:
# Performance
max_connections: 200
shared_buffers: 256MB
effective_cache_size: 768MB
maintenance_work_mem: 64MB
checkpoint_completion_target: 0.9
wal_buffers: 16MB
default_statistics_target: 100
random_page_cost: 1.1
effective_io_concurrency: 200
work_mem: 2621kB
huge_pages: off
min_wal_size: 1GB
max_wal_size: 4GB
max_worker_processes: 4
max_parallel_workers_per_gather: 2
max_parallel_workers: 4
max_parallel_maintenance_workers: 2
# Replication
wal_level: replica
hot_standby: "on"
max_wal_senders: 10
max_replication_slots: 10
hot_standby_feedback: "on"
# Logging
log_destination: 'stderr'
logging_collector: 'on'
log_directory: 'log'
log_filename: 'postgresql-%Y-%m-%d_%H%M%S.log'
log_rotation_age: '1d'
log_rotation_size: '100MB'
log_min_duration_statement: 1000
log_checkpoints: 'on'
log_connections: 'on'
log_disconnections: 'on'
log_lock_waits: 'on'
# Archive (for pgBackRest)
archive_mode: "on"
archive_command: 'pgbackrest --stanza={{ patroni_scope }} archive-push %p'
initdb:
- encoding: UTF8
- data-checksums
pg_hba:
- host replication replicator 0.0.0.0/0 scram-sha-256
- host all all 0.0.0.0/0 scram-sha-256
users:
{{ postgres_user }}:
password: {{ postgres_password }}
options:
- createrole
- createdb
replicator:
password: {{ replicator_password | default('replicator_secret') }}
options:
- replication
postgresql:
listen: 0.0.0.0:5432
connect_address: {{ ansible_host }}:5432
data_dir: {{ postgres_data_dir }}
bin_dir: /usr/lib/postgresql/{{ postgres_version }}/bin
config_dir: {{ postgres_config_dir }}
pgpass: /var/lib/postgresql/.pgpass
authentication:
replication:
username: replicator
password: {{ replicator_password | default('replicator_secret') }}
superuser:
username: postgres
password: {{ postgres_superuser_password | default('postgres_secret') }}
rewind:
username: rewind
password: {{ rewind_password | default('rewind_secret') }}
parameters:
unix_socket_directories: '/var/run/postgresql'
pg_hba:
- local all all peer
- host all all 127.0.0.1/32 scram-sha-256
- host all all 0.0.0.0/0 scram-sha-256
- host replication replicator 0.0.0.0/0 scram-sha-256
tags:
nofailover: false
noloadbalance: false
clonefrom: false
nosync: false

View File

@@ -0,0 +1,40 @@
# pgBackRest Configuration for {{ inventory_hostname }}
# Generated by Ansible
[global]
# Repository
repo1-path={{ pgbackrest_repo_path }}
repo1-retention-full=2
repo1-retention-diff=7
# S3 (optional - uncomment for cloud backups)
# repo2-type=s3
# repo2-path=/backup
# repo2-s3-bucket={{ pgbackrest_s3_bucket }}
# repo2-s3-endpoint=s3.eu-central-1.amazonaws.com
# repo2-s3-region=eu-central-1
# repo2-s3-key={{ pgbackrest_s3_key | default('') }}
# repo2-s3-key-secret={{ pgbackrest_s3_secret | default('') }}
# repo2-retention-full=4
# repo2-retention-diff=14
# Compression
compress-type=zst
compress-level=3
# Parallel
process-max=4
# Logging
log-level-console=info
log-level-file=detail
log-path=/var/log/pgbackrest
# Archive
archive-async=y
archive-push-queue-max=4GB
[{{ patroni_scope }}]
pg1-path={{ postgres_data_dir }}
pg1-port=5432
pg1-user=postgres

View File

@@ -0,0 +1,44 @@
# PgBouncer Configuration for {{ inventory_hostname }}
# Generated by Ansible
[databases]
{{ postgres_db }} = host=127.0.0.1 port=5432 dbname={{ postgres_db }}
* = host=127.0.0.1 port=5432
[pgbouncer]
listen_addr = 0.0.0.0
listen_port = {{ pgbouncer_port }}
unix_socket_dir = /var/run/postgresql
auth_type = scram-sha-256
auth_file = /etc/pgbouncer/userlist.txt
# Pool settings
pool_mode = transaction
max_client_conn = {{ pgbouncer_max_client_conn }}
default_pool_size = {{ pgbouncer_default_pool_size }}
min_pool_size = 10
reserve_pool_size = 5
reserve_pool_timeout = 3
# Timeouts
server_connect_timeout = 15
server_idle_timeout = 600
server_lifetime = 3600
client_idle_timeout = 0
client_login_timeout = 60
query_timeout = 0
query_wait_timeout = 120
# Logging
log_connections = 1
log_disconnections = 1
log_pooler_errors = 1
stats_period = 60
# Admin
admin_users = pgbouncer,{{ postgres_user }}
stats_users = pgbouncer,{{ postgres_user }}
# Security
ignore_startup_parameters = extra_float_digits

View File

@@ -0,0 +1,458 @@
-- ============================================================================
-- DAARION Agent Memory System - PostgreSQL Schema
-- Version: 1.0.0
-- Date: 2026-01-10
--
-- Трирівнева пам'ять агентів:
-- 1. Short-term: conversation_events (робочий буфер)
-- 2. Mid-term: thread_summaries (сесійна/тематична)
-- 3. Long-term: long_term_memory_items (персональна/проектна)
-- ============================================================================
-- Extensions
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
CREATE EXTENSION IF NOT EXISTS "pgcrypto";
-- ============================================================================
-- CORE ENTITIES
-- ============================================================================
-- Organizations (top-level tenant)
CREATE TABLE IF NOT EXISTS organizations (
org_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
name VARCHAR(255) NOT NULL,
settings JSONB DEFAULT '{}',
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW()
);
-- Workspaces (projects within org)
CREATE TABLE IF NOT EXISTS workspaces (
workspace_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
org_id UUID NOT NULL REFERENCES organizations(org_id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
description TEXT,
settings JSONB DEFAULT '{}',
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX idx_workspaces_org ON workspaces(org_id);
-- Users
CREATE TABLE IF NOT EXISTS users (
user_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
org_id UUID NOT NULL REFERENCES organizations(org_id) ON DELETE CASCADE,
external_id VARCHAR(255), -- for SSO/OAuth mapping
email VARCHAR(255),
display_name VARCHAR(255),
preferences JSONB DEFAULT '{}',
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW(),
UNIQUE(org_id, external_id)
);
CREATE INDEX idx_users_org ON users(org_id);
CREATE INDEX idx_users_email ON users(email);
-- Agents
CREATE TABLE IF NOT EXISTS agents (
agent_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
org_id UUID NOT NULL REFERENCES organizations(org_id) ON DELETE CASCADE,
name VARCHAR(255) NOT NULL,
type VARCHAR(50) NOT NULL, -- 'assistant', 'specialist', 'coordinator'
model VARCHAR(100), -- 'claude-3-opus', 'gpt-4', etc.
system_prompt TEXT,
capabilities JSONB DEFAULT '[]', -- ['code', 'search', 'memory', 'tools']
settings JSONB DEFAULT '{}',
is_active BOOLEAN DEFAULT true,
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX idx_agents_org ON agents(org_id);
CREATE INDEX idx_agents_type ON agents(type);
-- ============================================================================
-- CONVERSATION LAYER (Short-term Memory)
-- ============================================================================
-- Conversation threads
CREATE TABLE IF NOT EXISTS conversation_threads (
thread_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
org_id UUID NOT NULL REFERENCES organizations(org_id) ON DELETE CASCADE,
workspace_id UUID REFERENCES workspaces(workspace_id) ON DELETE SET NULL,
user_id UUID NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
agent_id UUID REFERENCES agents(agent_id) ON DELETE SET NULL,
title VARCHAR(500),
status VARCHAR(50) DEFAULT 'active', -- 'active', 'archived', 'completed'
-- Metadata
tags JSONB DEFAULT '[]',
metadata JSONB DEFAULT '{}',
-- Stats
message_count INTEGER DEFAULT 0,
total_tokens INTEGER DEFAULT 0,
-- Timestamps
created_at TIMESTAMPTZ DEFAULT NOW(),
last_activity_at TIMESTAMPTZ DEFAULT NOW(),
archived_at TIMESTAMPTZ
);
CREATE INDEX idx_threads_org ON conversation_threads(org_id);
CREATE INDEX idx_threads_workspace ON conversation_threads(workspace_id);
CREATE INDEX idx_threads_user ON conversation_threads(user_id);
CREATE INDEX idx_threads_agent ON conversation_threads(agent_id);
CREATE INDEX idx_threads_status ON conversation_threads(status);
CREATE INDEX idx_threads_last_activity ON conversation_threads(last_activity_at DESC);
-- Conversation events (Event Log - source of truth)
CREATE TABLE IF NOT EXISTS conversation_events (
event_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
thread_id UUID NOT NULL REFERENCES conversation_threads(thread_id) ON DELETE CASCADE,
-- Event type
event_type VARCHAR(50) NOT NULL, -- 'message', 'tool_call', 'tool_result', 'decision', 'summary', 'memory_write', 'memory_retract', 'error'
-- For messages
role VARCHAR(20), -- 'user', 'assistant', 'system', 'tool'
content TEXT,
-- For tool calls
tool_name VARCHAR(100),
tool_input JSONB,
tool_output JSONB,
-- Structured payload (flexible)
payload JSONB DEFAULT '{}',
-- Token tracking
token_count INTEGER,
-- Metadata
model_used VARCHAR(100),
latency_ms INTEGER,
metadata JSONB DEFAULT '{}',
-- Ordering
sequence_num SERIAL,
created_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX idx_events_thread ON conversation_events(thread_id);
CREATE INDEX idx_events_type ON conversation_events(event_type);
CREATE INDEX idx_events_created ON conversation_events(created_at DESC);
CREATE INDEX idx_events_thread_seq ON conversation_events(thread_id, sequence_num);
-- ============================================================================
-- SUMMARY LAYER (Mid-term Memory)
-- ============================================================================
-- Thread summaries (rolling compression)
CREATE TABLE IF NOT EXISTS thread_summaries (
summary_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
thread_id UUID NOT NULL REFERENCES conversation_threads(thread_id) ON DELETE CASCADE,
-- Version tracking
version INTEGER NOT NULL DEFAULT 1,
-- Summary content
summary_text TEXT NOT NULL,
-- Structured state
state JSONB DEFAULT '{}', -- {goals: [], decisions: [], open_questions: [], next_steps: [], key_facts: []}
-- Coverage
events_from_seq INTEGER, -- first event sequence included
events_to_seq INTEGER, -- last event sequence included
events_count INTEGER,
-- Token info
original_tokens INTEGER, -- tokens before compression
summary_tokens INTEGER, -- tokens after compression
compression_ratio FLOAT,
-- Timestamps
created_at TIMESTAMPTZ DEFAULT NOW(),
UNIQUE(thread_id, version)
);
CREATE INDEX idx_summaries_thread ON thread_summaries(thread_id);
CREATE INDEX idx_summaries_thread_version ON thread_summaries(thread_id, version DESC);
-- ============================================================================
-- LONG-TERM MEMORY LAYER
-- ============================================================================
-- Memory categories enum
CREATE TYPE memory_category AS ENUM (
'preference', -- user likes/dislikes
'identity', -- who the user is
'constraint', -- limitations, rules
'project_fact', -- project-specific knowledge
'relationship', -- connections between entities
'skill', -- user capabilities
'goal', -- user objectives
'context', -- situational info
'feedback' -- user corrections/confirmations
);
-- Retention policy enum
CREATE TYPE retention_policy AS ENUM (
'permanent', -- keep until explicitly deleted
'session', -- delete after session
'ttl_days', -- delete after N days
'until_revoked' -- keep until user revokes
);
-- Long-term memory items
CREATE TABLE IF NOT EXISTS long_term_memory_items (
memory_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
-- Scope (all nullable for flexibility)
org_id UUID REFERENCES organizations(org_id) ON DELETE CASCADE,
workspace_id UUID REFERENCES workspaces(workspace_id) ON DELETE SET NULL,
user_id UUID REFERENCES users(user_id) ON DELETE CASCADE,
agent_id UUID REFERENCES agents(agent_id) ON DELETE SET NULL, -- null = global for user
-- Content
category memory_category NOT NULL,
fact_text TEXT NOT NULL, -- atomic statement
fact_embedding_id VARCHAR(100), -- reference to Qdrant point ID
-- Confidence & validation
confidence FLOAT DEFAULT 0.8 CHECK (confidence >= 0 AND confidence <= 1),
is_verified BOOLEAN DEFAULT false,
verification_count INTEGER DEFAULT 0,
-- Source tracking
source_event_id UUID REFERENCES conversation_events(event_id) ON DELETE SET NULL,
source_thread_id UUID REFERENCES conversation_threads(thread_id) ON DELETE SET NULL,
extraction_method VARCHAR(50), -- 'explicit', 'inferred', 'confirmed', 'imported'
-- Lifecycle
valid_from TIMESTAMPTZ DEFAULT NOW(),
valid_to TIMESTAMPTZ, -- null = currently valid
last_confirmed_at TIMESTAMPTZ,
last_used_at TIMESTAMPTZ,
use_count INTEGER DEFAULT 0,
-- Privacy & retention
is_sensitive BOOLEAN DEFAULT false,
retention retention_policy DEFAULT 'until_revoked',
ttl_days INTEGER, -- if retention = 'ttl_days'
-- Metadata
tags JSONB DEFAULT '[]',
metadata JSONB DEFAULT '{}',
-- Timestamps
created_at TIMESTAMPTZ DEFAULT NOW(),
updated_at TIMESTAMPTZ DEFAULT NOW()
);
-- Indexes for memory retrieval
CREATE INDEX idx_memory_org ON long_term_memory_items(org_id);
CREATE INDEX idx_memory_workspace ON long_term_memory_items(workspace_id);
CREATE INDEX idx_memory_user ON long_term_memory_items(user_id);
CREATE INDEX idx_memory_agent ON long_term_memory_items(agent_id);
CREATE INDEX idx_memory_category ON long_term_memory_items(category);
CREATE INDEX idx_memory_user_agent ON long_term_memory_items(user_id, agent_id);
CREATE INDEX idx_memory_valid ON long_term_memory_items(valid_from, valid_to);
CREATE INDEX idx_memory_confidence ON long_term_memory_items(confidence DESC);
CREATE INDEX idx_memory_created ON long_term_memory_items(created_at DESC);
-- GIN index for tags search
CREATE INDEX idx_memory_tags ON long_term_memory_items USING GIN (tags);
-- Memory feedback (user corrections)
CREATE TABLE IF NOT EXISTS memory_feedback (
feedback_id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
memory_id UUID NOT NULL REFERENCES long_term_memory_items(memory_id) ON DELETE CASCADE,
user_id UUID NOT NULL REFERENCES users(user_id) ON DELETE CASCADE,
action VARCHAR(20) NOT NULL, -- 'confirm', 'reject', 'edit', 'delete'
old_value TEXT,
new_value TEXT,
reason TEXT,
created_at TIMESTAMPTZ DEFAULT NOW()
);
CREATE INDEX idx_feedback_memory ON memory_feedback(memory_id);
CREATE INDEX idx_feedback_user ON memory_feedback(user_id);
-- ============================================================================
-- HELPER VIEWS
-- ============================================================================
-- Active memories for a user (across all agents)
CREATE OR REPLACE VIEW v_active_user_memories AS
SELECT
m.*,
a.name as agent_name
FROM long_term_memory_items m
LEFT JOIN agents a ON m.agent_id = a.agent_id
WHERE m.valid_to IS NULL
AND m.confidence >= 0.5
ORDER BY m.confidence DESC, m.last_used_at DESC NULLS LAST;
-- Recent conversations with summaries
CREATE OR REPLACE VIEW v_recent_conversations AS
SELECT
t.thread_id,
t.title,
t.user_id,
t.agent_id,
t.message_count,
t.last_activity_at,
s.summary_text,
s.state
FROM conversation_threads t
LEFT JOIN LATERAL (
SELECT summary_text, state
FROM thread_summaries
WHERE thread_id = t.thread_id
ORDER BY version DESC
LIMIT 1
) s ON true
WHERE t.status = 'active'
ORDER BY t.last_activity_at DESC;
-- ============================================================================
-- FUNCTIONS
-- ============================================================================
-- Update thread stats after new event
CREATE OR REPLACE FUNCTION update_thread_stats()
RETURNS TRIGGER AS $$
BEGIN
UPDATE conversation_threads
SET
message_count = message_count + CASE WHEN NEW.event_type = 'message' THEN 1 ELSE 0 END,
total_tokens = total_tokens + COALESCE(NEW.token_count, 0),
last_activity_at = NOW()
WHERE thread_id = NEW.thread_id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_update_thread_stats
AFTER INSERT ON conversation_events
FOR EACH ROW EXECUTE FUNCTION update_thread_stats();
-- Update memory usage stats
CREATE OR REPLACE FUNCTION update_memory_usage()
RETURNS TRIGGER AS $$
BEGIN
UPDATE long_term_memory_items
SET
use_count = use_count + 1,
last_used_at = NOW()
WHERE memory_id = NEW.memory_id;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Auto-update updated_at
CREATE OR REPLACE FUNCTION update_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER trg_organizations_updated
BEFORE UPDATE ON organizations
FOR EACH ROW EXECUTE FUNCTION update_updated_at();
CREATE TRIGGER trg_workspaces_updated
BEFORE UPDATE ON workspaces
FOR EACH ROW EXECUTE FUNCTION update_updated_at();
CREATE TRIGGER trg_users_updated
BEFORE UPDATE ON users
FOR EACH ROW EXECUTE FUNCTION update_updated_at();
CREATE TRIGGER trg_agents_updated
BEFORE UPDATE ON agents
FOR EACH ROW EXECUTE FUNCTION update_updated_at();
CREATE TRIGGER trg_memory_updated
BEFORE UPDATE ON long_term_memory_items
FOR EACH ROW EXECUTE FUNCTION update_updated_at();
-- ============================================================================
-- INITIAL DATA
-- ============================================================================
-- Default organization
INSERT INTO organizations (org_id, name, settings)
VALUES (
'a0000000-0000-0000-0000-000000000001',
'DAARION',
'{"tier": "enterprise", "features": ["memory", "multi-agent", "knowledge-base"]}'
) ON CONFLICT DO NOTHING;
-- Default workspace
INSERT INTO workspaces (workspace_id, org_id, name, description)
VALUES (
'b0000000-0000-0000-0000-000000000001',
'a0000000-0000-0000-0000-000000000001',
'MicroDAO',
'Main development workspace for DAARION project'
) ON CONFLICT DO NOTHING;
-- Default user (Ivan)
INSERT INTO users (user_id, org_id, external_id, display_name, preferences)
VALUES (
'c0000000-0000-0000-0000-000000000001',
'a0000000-0000-0000-0000-000000000001',
'ivan',
'Ivan Tytar',
'{"language": "uk", "timezone": "Europe/Kyiv"}'
) ON CONFLICT DO NOTHING;
-- Default agents
INSERT INTO agents (agent_id, org_id, name, type, model, capabilities) VALUES
(
'd0000000-0000-0000-0000-000000000001',
'a0000000-0000-0000-0000-000000000001',
'Claude Assistant',
'assistant',
'claude-3-opus',
'["code", "memory", "tools", "search"]'
),
(
'd0000000-0000-0000-0000-000000000002',
'a0000000-0000-0000-0000-000000000001',
'Code Specialist',
'specialist',
'claude-3-opus',
'["code", "review", "refactor"]'
),
(
'd0000000-0000-0000-0000-000000000003',
'a0000000-0000-0000-0000-000000000001',
'DevOps Agent',
'specialist',
'claude-3-opus',
'["infrastructure", "deployment", "monitoring"]'
)
ON CONFLICT DO NOTHING;
-- ============================================================================
-- COMMENTS
-- ============================================================================
COMMENT ON TABLE conversation_events IS 'Event log for all conversation activities - source of truth';
COMMENT ON TABLE thread_summaries IS 'Rolling summaries for context compression (mid-term memory)';
COMMENT ON TABLE long_term_memory_items IS 'Persistent facts about users/projects (long-term memory)';
COMMENT ON COLUMN long_term_memory_items.fact_text IS 'Atomic statement - one fact per row';
COMMENT ON COLUMN long_term_memory_items.confidence IS 'Confidence score 0-1, increases with confirmations';
COMMENT ON COLUMN long_term_memory_items.fact_embedding_id IS 'Reference to Qdrant vector point ID';

View File

@@ -0,0 +1,105 @@
---
# DAARION Memory Service
# Agent memory management with PostgreSQL + Qdrant + Cohere
apiVersion: apps/v1
kind: Deployment
metadata:
name: memory-service
namespace: daarion
labels:
app: memory-service
component: memory
spec:
replicas: 1
selector:
matchLabels:
app: memory-service
template:
metadata:
labels:
app: memory-service
component: memory
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: "true"
containers:
- name: memory-service
image: ghcr.io/ivantytar/memory-service:latest
imagePullPolicy: Always
ports:
- containerPort: 8000
name: http
envFrom:
- secretRef:
name: memory-service-secrets
env:
- name: MEMORY_DEBUG
value: "false"
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /health
port: 8000
initialDelaySeconds: 5
periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
name: memory-service
namespace: daarion
spec:
selector:
app: memory-service
ports:
- name: http
port: 8000
targetPort: 8000
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: memory-service-external
namespace: daarion
spec:
selector:
app: memory-service
ports:
- name: http
port: 8000
targetPort: 8000
nodePort: 30800
type: NodePort
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: memory-service-ingress
namespace: daarion
annotations:
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: memory.daarion.local
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: memory-service
port:
number: 8000

View File

@@ -0,0 +1,133 @@
---
# DAARION Qdrant Vector Database
# For semantic search in agent memory system
apiVersion: v1
kind: Namespace
metadata:
name: qdrant
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: qdrant-storage
namespace: qdrant
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 20Gi
storageClassName: local-path
---
apiVersion: v1
kind: ConfigMap
metadata:
name: qdrant-config
namespace: qdrant
data:
config.yaml: |
log_level: INFO
storage:
storage_path: /qdrant/storage
snapshots_path: /qdrant/snapshots
service:
http_port: 6333
grpc_port: 6334
cluster:
enabled: false
telemetry_disabled: true
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: qdrant
namespace: qdrant
labels:
app: qdrant
spec:
replicas: 1
selector:
matchLabels:
app: qdrant
template:
metadata:
labels:
app: qdrant
spec:
nodeSelector:
node-role.kubernetes.io/control-plane: "true"
containers:
- name: qdrant
image: qdrant/qdrant:v1.7.4
ports:
- containerPort: 6333
name: http
- containerPort: 6334
name: grpc
volumeMounts:
- name: storage
mountPath: /qdrant/storage
- name: config
mountPath: /qdrant/config
resources:
requests:
memory: "512Mi"
cpu: "250m"
limits:
memory: "2Gi"
cpu: "1"
livenessProbe:
httpGet:
path: /
port: 6333
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /readyz
port: 6333
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: storage
persistentVolumeClaim:
claimName: qdrant-storage
- name: config
configMap:
name: qdrant-config
---
apiVersion: v1
kind: Service
metadata:
name: qdrant
namespace: qdrant
spec:
selector:
app: qdrant
ports:
- name: http
port: 6333
targetPort: 6333
- name: grpc
port: 6334
targetPort: 6334
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: qdrant-external
namespace: qdrant
spec:
selector:
app: qdrant
ports:
- name: http
port: 6333
targetPort: 6333
nodePort: 30633
type: NodePort