Files
microdao-daarion/services/parser-service/app/core/config.py
Apple 4601c6fca8 feat: add Vision Encoder service + Vision RAG implementation
- Vision Encoder Service (OpenCLIP ViT-L/14, GPU-accelerated)
  - FastAPI app with text/image embedding endpoints (768-dim)
  - Docker support with NVIDIA GPU runtime
  - Port 8001, health checks, model info API

- Qdrant Vector Database integration
  - Port 6333/6334 (HTTP/gRPC)
  - Image embeddings storage (768-dim, Cosine distance)
  - Auto collection creation

- Vision RAG implementation
  - VisionEncoderClient (Python client for API)
  - Image Search module (text-to-image, image-to-image)
  - Vision RAG routing in DAGI Router (mode: image_search)
  - VisionEncoderProvider integration

- Documentation (5000+ lines)
  - SYSTEM-INVENTORY.md - Complete system inventory
  - VISION-ENCODER-STATUS.md - Service status
  - VISION-RAG-IMPLEMENTATION.md - Implementation details
  - vision_encoder_deployment_task.md - Deployment checklist
  - services/vision-encoder/README.md - Deployment guide
  - Updated WARP.md, INFRASTRUCTURE.md, Jupyter Notebook

- Testing
  - test-vision-encoder.sh - Smoke tests (6 tests)
  - Unit tests for client, image search, routing

- Services: 17 total (added Vision Encoder + Qdrant)
- AI Models: 3 (qwen3:8b, OpenCLIP ViT-L/14, BAAI/bge-m3)
- GPU Services: 2 (Vision Encoder, Ollama)
- VRAM Usage: ~10 GB (concurrent)

Status: Production Ready 
2025-11-17 05:24:36 -08:00

64 lines
2.3 KiB
Python

"""
Configuration for PARSER Service
"""
import os
from typing import Literal, Optional
from pydantic_settings import BaseSettings
class Settings(BaseSettings):
"""Application settings"""
# Service
API_HOST: str = "0.0.0.0"
API_PORT: int = 9400
# PARSER Model
PARSER_MODEL_NAME: str = os.getenv("PARSER_MODEL_NAME", os.getenv("DOTS_OCR_MODEL_ID", "rednote-hilab/dots.ocr"))
PARSER_DEVICE: Literal["cuda", "cpu", "mps"] = os.getenv("PARSER_DEVICE", os.getenv("DEVICE", "cpu"))
PARSER_MAX_PAGES: int = int(os.getenv("PARSER_MAX_PAGES", "100"))
PARSER_MAX_RESOLUTION: str = os.getenv("PARSER_MAX_RESOLUTION", "4096x4096")
PARSER_BATCH_SIZE: int = int(os.getenv("PARSER_BATCH_SIZE", "1"))
# File handling
MAX_FILE_SIZE_MB: int = int(os.getenv("MAX_FILE_SIZE_MB", "50"))
TEMP_DIR: str = os.getenv("TEMP_DIR", "/tmp/parser")
# PDF processing
PDF_DPI: int = int(os.getenv("PDF_DPI", "200"))
PAGE_RANGE: Optional[str] = os.getenv("PAGE_RANGE", None) # e.g., "1-20" for pages 1-20
# Image processing
IMAGE_MAX_SIZE: int = int(os.getenv("IMAGE_MAX_SIZE", "2048")) # Max size for longest side
# Parser mode
USE_DUMMY_PARSER: bool = os.getenv("USE_DUMMY_PARSER", "false").lower() == "true"
ALLOW_DUMMY_FALLBACK: bool = os.getenv("ALLOW_DUMMY_FALLBACK", "true").lower() == "true"
# Runtime
RUNTIME_TYPE: Literal["local", "remote", "ollama"] = os.getenv("RUNTIME_TYPE", "local")
RUNTIME_URL: str = os.getenv("RUNTIME_URL", "http://parser-runtime:11435")
# Ollama configuration (if RUNTIME_TYPE=ollama)
OLLAMA_BASE_URL: str = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
# DAGI Router configuration (for qa_pairs 2-stage pipeline)
ROUTER_BASE_URL: str = os.getenv("ROUTER_BASE_URL", "http://router:9102")
ROUTER_TIMEOUT: int = int(os.getenv("ROUTER_TIMEOUT", "60"))
# RAG Service configuration (for ingest pipeline)
RAG_BASE_URL: str = os.getenv("RAG_BASE_URL", "http://rag-service:9500")
RAG_TIMEOUT: int = int(os.getenv("RAG_TIMEOUT", "120"))
# NATS JetStream configuration
NATS_URL: str = os.getenv("NATS_URL", "nats://localhost:4222")
class Config:
env_file = ".env"
case_sensitive = True
settings = Settings()