refactor(llm): enable hot-reload for model config and client runtime
make LLM task config resolution dynamic in LLMRequest load model clients on demand from latest config clear client instance cache on config reload remove stale module-level model_config usage in llm_api add hot-reload tests for LLM/config watcher flow
This commit is contained in:
@@ -1,8 +1,28 @@
|
||||
from src.config.config import model_config
|
||||
from importlib import import_module
|
||||
|
||||
used_client_types = {provider.client_type for provider in model_config.api_providers}
|
||||
from src.config.config import config_manager
|
||||
|
||||
if "openai" in used_client_types:
|
||||
from . import openai_client # noqa: F401
|
||||
if "gemini" in used_client_types:
|
||||
from . import gemini_client # noqa: F401
|
||||
_CLIENT_MODULE_BY_TYPE: dict[str, str] = {
|
||||
"openai": ".openai_client",
|
||||
"gemini": ".gemini_client",
|
||||
}
|
||||
|
||||
_LOADED_CLIENT_TYPES: set[str] = set()
|
||||
|
||||
|
||||
def ensure_client_type_loaded(client_type: str) -> None:
|
||||
if client_type in _LOADED_CLIENT_TYPES:
|
||||
return
|
||||
module_name = _CLIENT_MODULE_BY_TYPE.get(client_type)
|
||||
if not module_name:
|
||||
return
|
||||
import_module(module_name, package=__name__)
|
||||
_LOADED_CLIENT_TYPES.add(client_type)
|
||||
|
||||
|
||||
def ensure_configured_clients_loaded() -> None:
|
||||
for provider in config_manager.get_model_config().api_providers:
|
||||
ensure_client_type_loaded(provider.client_type)
|
||||
|
||||
|
||||
ensure_configured_clients_loaded()
|
||||
|
||||
Reference in New Issue
Block a user