Files
microdao-daarion/services/llm-proxy/providers/base_provider.py
Apple fca48b3eb0 feat(node2): Complete NODE2 setup - guardian, agents, swapper models
- Node-guardian running on MacBook and updating metrics
- NODE2 agents (Atlas, Greeter, Oracle, Builder Bot) assigned to node-2-macbook-m4max
- Swapper models displaying correctly (8 models)
- DAGI Router agents showing with correct status (3 active, 1 stale)
- Router health check using node_cache for remote nodes
2025-12-02 07:07:58 -08:00

40 lines
945 B
Python

from abc import ABC, abstractmethod
from typing import Protocol
from models import ChatMessage, LLMResponse
class BaseProvider(Protocol):
"""Base protocol for LLM providers"""
@abstractmethod
async def chat(
self,
messages: list[ChatMessage],
model_name: str,
max_tokens: int | None = None,
temperature: float = 0.7,
top_p: float = 1.0,
**kwargs
) -> LLMResponse:
"""
Send chat completion request to LLM provider
Args:
messages: List of chat messages
model_name: Physical model name for this provider
max_tokens: Maximum tokens to generate
temperature: Sampling temperature
top_p: Nucleus sampling parameter
**kwargs: Provider-specific parameters
Returns:
LLMResponse with content, usage, and metadata
"""
...