Ollama Runtime: - Add ollama_client.py for Ollama API integration - Support for dots-ocr model via Ollama - Add OLLAMA_BASE_URL configuration - Update inference.py to support Ollama runtime (RUNTIME_TYPE=ollama) - Update endpoints to handle async Ollama calls - Alternative to local transformers model RAG Implementation Plan: - Create TODO-RAG.md with detailed Haystack integration plan - Document Store setup (pgvector) - Embedding model selection - Ingest pipeline (PARSER → RAG) - Query pipeline (RAG → LLM) - Integration with DAGI Router - Bot commands (/upload_doc, /ask_doc) - Testing strategy Now supports three runtime modes: 1. Local transformers (RUNTIME_TYPE=local) 2. Ollama (RUNTIME_TYPE=ollama) 3. Dummy (USE_DUMMY_PARSER=true)
53 lines
1.7 KiB
Python
53 lines
1.7 KiB
Python
"""
|
|
Configuration for PARSER Service
|
|
"""
|
|
|
|
import os
|
|
from typing import Literal, Optional
|
|
from pydantic_settings import BaseSettings
|
|
|
|
|
|
class Settings(BaseSettings):
|
|
"""Application settings"""
|
|
|
|
# Service
|
|
API_HOST: str = "0.0.0.0"
|
|
API_PORT: int = 9400
|
|
|
|
# PARSER Model
|
|
PARSER_MODEL_NAME: str = os.getenv("PARSER_MODEL_NAME", "rednote-hilab/dots.ocr")
|
|
PARSER_DEVICE: Literal["cuda", "cpu", "mps"] = os.getenv("PARSER_DEVICE", "cpu")
|
|
PARSER_MAX_PAGES: int = int(os.getenv("PARSER_MAX_PAGES", "100"))
|
|
PARSER_MAX_RESOLUTION: str = os.getenv("PARSER_MAX_RESOLUTION", "4096x4096")
|
|
PARSER_BATCH_SIZE: int = int(os.getenv("PARSER_BATCH_SIZE", "1"))
|
|
|
|
# File handling
|
|
MAX_FILE_SIZE_MB: int = int(os.getenv("MAX_FILE_SIZE_MB", "50"))
|
|
TEMP_DIR: str = os.getenv("TEMP_DIR", "/tmp/parser")
|
|
|
|
# PDF processing
|
|
PDF_DPI: int = int(os.getenv("PDF_DPI", "200"))
|
|
PAGE_RANGE: Optional[str] = os.getenv("PAGE_RANGE", None) # e.g., "1-20" for pages 1-20
|
|
|
|
# Image processing
|
|
IMAGE_MAX_SIZE: int = int(os.getenv("IMAGE_MAX_SIZE", "2048")) # Max size for longest side
|
|
|
|
# Parser mode
|
|
USE_DUMMY_PARSER: bool = os.getenv("USE_DUMMY_PARSER", "false").lower() == "true"
|
|
ALLOW_DUMMY_FALLBACK: bool = os.getenv("ALLOW_DUMMY_FALLBACK", "true").lower() == "true"
|
|
|
|
# Runtime
|
|
RUNTIME_TYPE: Literal["local", "remote", "ollama"] = os.getenv("RUNTIME_TYPE", "local")
|
|
RUNTIME_URL: str = os.getenv("RUNTIME_URL", "http://parser-runtime:11435")
|
|
|
|
# Ollama configuration (if RUNTIME_TYPE=ollama)
|
|
OLLAMA_BASE_URL: str = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434")
|
|
|
|
class Config:
|
|
env_file = ".env"
|
|
case_sensitive = True
|
|
|
|
|
|
settings = Settings()
|
|
|