G.2.3 - PDF/Image Support: - Add preprocessing.py with PDF→images conversion (pdf2image) - Add image loading and normalization - Add file type detection and validation - Support for PDF, PNG, JPEG, WebP, TIFF G.2.4 - Pre/Post-processing: - Add postprocessing.py with structured output builders - build_chunks() - semantic chunks for RAG - build_qa_pairs() - Q&A extraction - build_markdown() - Markdown conversion - Text normalization and chunking logic G.1.3 - dots.ocr Integration Prep: - Update model_loader.py with proper error handling - Add USE_DUMMY_PARSER and ALLOW_DUMMY_FALLBACK flags - Update inference.py to work with images list - Add parse_document_from_images() function - Ready for actual model integration Configuration: - Add PDF_DPI, IMAGE_MAX_SIZE, PAGE_RANGE settings - Add parser mode flags (USE_DUMMY_PARSER, ALLOW_DUMMY_FALLBACK) API Updates: - Update endpoints to use new preprocessing pipeline - Integrate post-processing for all output modes - Remove temp file handling (work directly with bytes)
94 lines
2.6 KiB
Python
94 lines
2.6 KiB
Python
"""
|
|
Model loader for dots.ocr
|
|
Handles lazy loading and GPU/CPU fallback
|
|
"""
|
|
|
|
import logging
|
|
from typing import Optional, Literal
|
|
from pathlib import Path
|
|
|
|
from app.core.config import settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
# Global model instance
|
|
_model: Optional[object] = None
|
|
|
|
|
|
def load_model() -> Optional[object]:
|
|
"""
|
|
Load dots.ocr model
|
|
|
|
Returns:
|
|
Loaded model instance or None if loading fails
|
|
"""
|
|
global _model
|
|
|
|
if _model is not None:
|
|
return _model
|
|
|
|
# Check if dummy mode is enabled
|
|
if settings.USE_DUMMY_PARSER:
|
|
logger.info("Dummy parser mode enabled, skipping model loading")
|
|
return None
|
|
|
|
logger.info(f"Loading model: {settings.PARSER_MODEL_NAME}")
|
|
logger.info(f"Device: {settings.PARSER_DEVICE}")
|
|
|
|
try:
|
|
# TODO: Implement actual model loading
|
|
# Example for dots.ocr (adjust based on actual model structure):
|
|
# from transformers import AutoModelForVision2Seq, AutoProcessor
|
|
#
|
|
# processor = AutoProcessor.from_pretrained(settings.PARSER_MODEL_NAME)
|
|
# model = AutoModelForVision2Seq.from_pretrained(
|
|
# settings.PARSER_MODEL_NAME,
|
|
# device_map=settings.PARSER_DEVICE if settings.PARSER_DEVICE != "cpu" else None,
|
|
# torch_dtype=torch.float16 if settings.PARSER_DEVICE != "cpu" else torch.float32
|
|
# )
|
|
#
|
|
# if settings.PARSER_DEVICE == "cpu":
|
|
# model = model.to("cpu")
|
|
#
|
|
# _model = {
|
|
# "model": model,
|
|
# "processor": processor,
|
|
# "device": settings.PARSER_DEVICE
|
|
# }
|
|
#
|
|
# logger.info("Model loaded successfully")
|
|
|
|
# For now, return None (will use dummy parser)
|
|
logger.warning("Model loading not yet implemented, will use dummy parser")
|
|
_model = None
|
|
|
|
except ImportError as e:
|
|
logger.error(f"Required packages not installed: {e}")
|
|
if not settings.ALLOW_DUMMY_FALLBACK:
|
|
raise
|
|
_model = None
|
|
except Exception as e:
|
|
logger.error(f"Failed to load model: {e}", exc_info=True)
|
|
if not settings.ALLOW_DUMMY_FALLBACK:
|
|
raise
|
|
_model = None
|
|
|
|
return _model
|
|
|
|
|
|
def get_model() -> Optional[object]:
|
|
"""Get loaded model instance"""
|
|
if _model is None:
|
|
return load_model()
|
|
return _model
|
|
|
|
|
|
def unload_model():
|
|
"""Unload model from memory"""
|
|
global _model
|
|
if _model is not None:
|
|
# TODO: Proper cleanup
|
|
_model = None
|
|
logger.info("Model unloaded")
|
|
|