- Vision Encoder Service (OpenCLIP ViT-L/14, GPU-accelerated)
- FastAPI app with text/image embedding endpoints (768-dim)
- Docker support with NVIDIA GPU runtime
- Port 8001, health checks, model info API
- Qdrant Vector Database integration
- Port 6333/6334 (HTTP/gRPC)
- Image embeddings storage (768-dim, Cosine distance)
- Auto collection creation
- Vision RAG implementation
- VisionEncoderClient (Python client for API)
- Image Search module (text-to-image, image-to-image)
- Vision RAG routing in DAGI Router (mode: image_search)
- VisionEncoderProvider integration
- Documentation (5000+ lines)
- SYSTEM-INVENTORY.md - Complete system inventory
- VISION-ENCODER-STATUS.md - Service status
- VISION-RAG-IMPLEMENTATION.md - Implementation details
- vision_encoder_deployment_task.md - Deployment checklist
- services/vision-encoder/README.md - Deployment guide
- Updated WARP.md, INFRASTRUCTURE.md, Jupyter Notebook
- Testing
- test-vision-encoder.sh - Smoke tests (6 tests)
- Unit tests for client, image search, routing
- Services: 17 total (added Vision Encoder + Qdrant)
- AI Models: 3 (qwen3:8b, OpenCLIP ViT-L/14, BAAI/bge-m3)
- GPU Services: 2 (Vision Encoder, Ollama)
- VRAM Usage: ~10 GB (concurrent)
Status: Production Ready ✅
55 lines
1.6 KiB
Python
55 lines
1.6 KiB
Python
"""
|
|
Configuration for RAG Service
|
|
"""
|
|
|
|
import os
|
|
from typing import Literal
|
|
from pydantic_settings import BaseSettings
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
"""Application settings"""
|
|
|
|
# Service
|
|
API_HOST: str = "0.0.0.0"
|
|
API_PORT: int = 9500
|
|
|
|
# PostgreSQL + pgvector
|
|
PG_DSN: str = os.getenv(
|
|
"PG_DSN",
|
|
"postgresql+psycopg2://postgres:postgres@city-db:5432/daarion_city"
|
|
)
|
|
|
|
# Embedding model
|
|
EMBED_MODEL_NAME: str = os.getenv("EMBED_MODEL_NAME", "BAAI/bge-m3")
|
|
EMBED_DEVICE: Literal["cuda", "cpu", "mps"] = os.getenv("EMBED_DEVICE", "cpu")
|
|
EMBED_DIM: int = int(os.getenv("EMBED_DIM", "1024")) # BAAI/bge-m3 = 1024
|
|
|
|
# Document Store
|
|
RAG_TABLE_NAME: str = os.getenv("RAG_TABLE_NAME", "rag_documents")
|
|
SEARCH_STRATEGY: Literal["approximate", "exact"] = os.getenv("SEARCH_STRATEGY", "approximate")
|
|
|
|
# Chunking
|
|
CHUNK_SIZE: int = int(os.getenv("CHUNK_SIZE", "500"))
|
|
CHUNK_OVERLAP: int = int(os.getenv("CHUNK_OVERLAP", "50"))
|
|
|
|
# Retrieval
|
|
TOP_K: int = int(os.getenv("TOP_K", "5"))
|
|
|
|
# LLM (for query pipeline)
|
|
LLM_PROVIDER: str = os.getenv("LLM_PROVIDER", "router") # router, openai, local
|
|
ROUTER_BASE_URL: str = os.getenv("ROUTER_BASE_URL", "http://router:9102")
|
|
OPENAI_API_KEY: str = os.getenv("OPENAI_API_KEY", "")
|
|
OPENAI_MODEL: str = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
|
|
|
|
# NATS JetStream configuration
|
|
NATS_URL: str = os.getenv("NATS_URL", "nats://localhost:4222")
|
|
|
|
class Config:
|
|
env_file = ".env"
|
|
case_sensitive = True
|
|
|
|
|
|
settings = Settings()
|
|
|