Remove LLMConfig, pass LLM settings directly to LLMClient

LLMConfig was an unnecessary intermediary — LLMClient now takes
model, host, and port directly as constructor args.

https://claude.ai/code/session_01AKXQBuVBsW7J1YbukDiQ7A
This commit is contained in:
Claude
2026-03-08 20:52:45 +00:00
committed by Richie Cahill
parent ab2d8dbd51
commit f11c9bed58
4 changed files with 16 additions and 38 deletions

View File

@@ -4,29 +4,27 @@ from __future__ import annotations
import base64 import base64
import logging import logging
from typing import TYPE_CHECKING, Any from typing import Any
import httpx import httpx
if TYPE_CHECKING:
from python.signal_bot.models import LLMConfig
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
class LLMClient: class LLMClient:
"""Talk to an ollama instance. """Talk to an ollama instance.
Designed to be swappable — change ``config.model`` to try a new LLM
without touching any calling code.
Args: Args:
config: LLM connection and model configuration. model: Ollama model name.
host: Ollama host.
port: Ollama port.
temperature: Sampling temperature.
""" """
def __init__(self, config: LLMConfig) -> None: def __init__(self, model: str, host: str, port: int = 11434, *, temperature: float = 0.1) -> None:
self.config = config self.model = model
self._client = httpx.Client(base_url=config.base_url, timeout=120) self.temperature = temperature
self._client = httpx.Client(base_url=f"http://{host}:{port}", timeout=120)
def chat(self, prompt: str, *, system: str = "") -> str: def chat(self, prompt: str, *, system: str = "") -> str:
"""Send a text prompt and return the response.""" """Send a text prompt and return the response."""
@@ -51,12 +49,12 @@ class LLMClient:
def _generate(self, messages: list[dict[str, Any]]) -> str: def _generate(self, messages: list[dict[str, Any]]) -> str:
"""Call the ollama chat API.""" """Call the ollama chat API."""
payload = { payload = {
"model": self.config.model, "model": self.model,
"messages": messages, "messages": messages,
"stream": False, "stream": False,
"options": {"temperature": self.config.temperature}, "options": {"temperature": self.temperature},
} }
logger.info(f"LLM request to {self.config.model}") logger.info(f"LLM request to {self.model}")
response = self._client.post("/api/chat", json=payload) response = self._client.post("/api/chat", json=payload)
response.raise_for_status() response.raise_for_status()
data = response.json() data = response.json()

View File

@@ -13,7 +13,7 @@ from python.common import configure_logger
from python.signal_bot.commands.inventory import handle_inventory_update from python.signal_bot.commands.inventory import handle_inventory_update
from python.signal_bot.device_registry import DeviceRegistry from python.signal_bot.device_registry import DeviceRegistry
from python.signal_bot.llm_client import LLMClient from python.signal_bot.llm_client import LLMClient
from python.signal_bot.models import BotConfig, LLMConfig, SignalMessage from python.signal_bot.models import BotConfig, SignalMessage
from python.signal_bot.signal_client import SignalClient from python.signal_bot.signal_client import SignalClient
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -70,7 +70,7 @@ def dispatch(
device_count = len(registry.list_devices()) device_count = len(registry.list_devices())
signal.reply( signal.reply(
message, message,
f"Bot online.\nLLM: {llm.config.model}\nAvailable models: {model_list}\nKnown devices: {device_count}", f"Bot online.\nLLM: {llm.model}\nAvailable models: {model_list}\nKnown devices: {device_count}",
) )
elif cmd == "inventory" or (message.attachments and not text.startswith(CMD_PREFIX)): elif cmd == "inventory" or (message.attachments and not text.startswith(CMD_PREFIX)):
@@ -114,7 +114,6 @@ def main(
"""Run the Signal command and control bot.""" """Run the Signal command and control bot."""
configure_logger(log_level) configure_logger(log_level)
llm_config = LLMConfig(model=llm_model, host=llm_host, port=llm_port)
config = BotConfig( config = BotConfig(
signal_api_url=signal_api_url, signal_api_url=signal_api_url,
phone_number=phone_number, phone_number=phone_number,
@@ -122,7 +121,7 @@ def main(
) )
signal = SignalClient(config.signal_api_url, config.phone_number) signal = SignalClient(config.signal_api_url, config.phone_number)
llm = LLMClient(llm_config) llm = LLMClient(model=llm_model, host=llm_host, port=llm_port)
registry = DeviceRegistry(signal, Path(registry_file)) registry = DeviceRegistry(signal, Path(registry_file))
try: try:

View File

@@ -62,20 +62,6 @@ class InventoryUpdate(BaseModel):
source_type: str = "" # "receipt_photo" or "text_list" source_type: str = "" # "receipt_photo" or "text_list"
class LLMConfig(BaseModel):
"""Configuration for an LLM backend."""
model: str
host: str
port: int = 11434
temperature: float = 0.1
@property
def base_url(self) -> str:
"""Ollama API base URL."""
return f"http://{self.host}:{self.port}"
class BotConfig(BaseModel): class BotConfig(BaseModel):
"""Top-level bot configuration.""" """Top-level bot configuration."""

View File

@@ -19,7 +19,6 @@ from python.signal_bot.llm_client import LLMClient
from python.signal_bot.main import dispatch from python.signal_bot.main import dispatch
from python.signal_bot.models import ( from python.signal_bot.models import (
InventoryItem, InventoryItem,
LLMConfig,
SignalMessage, SignalMessage,
TrustLevel, TrustLevel,
) )
@@ -27,10 +26,6 @@ from python.signal_bot.signal_client import SignalClient
class TestModels: class TestModels:
def test_llm_config_base_url(self):
config = LLMConfig(model="test:7b", host="bob.local", port=11434)
assert config.base_url == "http://bob.local:11434"
def test_trust_level_values(self): def test_trust_level_values(self):
assert TrustLevel.VERIFIED == "verified" assert TrustLevel.VERIFIED == "verified"
assert TrustLevel.UNVERIFIED == "unverified" assert TrustLevel.UNVERIFIED == "unverified"
@@ -201,7 +196,7 @@ class TestDispatch:
def test_status_command(self, signal_mock, llm_mock, registry_mock, tmp_path): def test_status_command(self, signal_mock, llm_mock, registry_mock, tmp_path):
llm_mock.list_models.return_value = ["model1", "model2"] llm_mock.list_models.return_value = ["model1", "model2"]
llm_mock.config = LLMConfig(model="test:7b", host="bob") llm_mock.model = "test:7b"
registry_mock.list_devices.return_value = [] registry_mock.list_devices.return_value = []
msg = SignalMessage(source="+1234", timestamp=0, message="!status") msg = SignalMessage(source="+1234", timestamp=0, message="!status")
dispatch(msg, signal_mock, llm_mock, registry_mock, tmp_path / "inv.json") dispatch(msg, signal_mock, llm_mock, registry_mock, tmp_path / "inv.json")