feat:模型支持高级配置

This commit is contained in:
SengokuCola
2026-05-04 13:50:53 +08:00
parent 09f6a89b98
commit c5cd47adc2
8 changed files with 46 additions and 41 deletions

View File

@@ -59,7 +59,7 @@ LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.10.1"
MODEL_CONFIG_VERSION: str = "1.14.6"
MODEL_CONFIG_VERSION: str = "1.14.8"
logger = get_logger("config")

View File

@@ -32,6 +32,13 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"slow_threshold": 120.0,
"selection_strategy": "random",
},
"learner": {
"model_list": [],
"max_tokens": 4096,
"temperature": 0.5,
"slow_threshold": 15.0,
"selection_strategy": "random",
},
"planner": {
"model_list": ["deepseek-v4-flash"],
"max_tokens": 8000,
@@ -39,13 +46,6 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"slow_threshold": 12.0,
"selection_strategy": "random",
},
"vlm": {
"model_list": ["qwen3.5-flash"],
"max_tokens": 512,
"temperature": 0.3,
"slow_threshold": 15.0,
"selection_strategy": "random",
},
"voice": {
"model_list": [""],
"max_tokens": 1024,
@@ -53,13 +53,6 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"slow_threshold": 12.0,
"selection_strategy": "random",
},
"embedding": {
"model_list": ["qwen3-embedding"],
"max_tokens": 1024,
"temperature": 0.3,
"slow_threshold": 5.0,
"selection_strategy": "random",
},
}
DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [
@@ -89,24 +82,6 @@ DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [
"price_out": 2.0,
"visual": False,
"extra_params": {"enable_thinking": "false"},
},
{
"model_identifier": "qwen3.5-flash",
"name": "qwen3.5-flash",
"api_provider": "BaiLian",
"price_in": 0.2,
"price_out": 2.0,
"visual": True,
"extra_params": {"enable_thinking": "false"},
},
{
"model_identifier": "text-embedding-v4",
"name": "qwen3-embedding",
"api_provider": "BaiLian",
"price_in": 0.5,
"price_out": 0.5,
"visual": False,
"extra_params": {},
}
]

View File

@@ -436,8 +436,18 @@ class ModelTaskConfig(ConfigBase):
"x-icon": "message-square",
},
)
"""首要回复模型配置, 还用于表达器和表达方式学习"""
"""首要回复模型配置"""
learner: TaskConfig = Field(
default_factory=TaskConfig,
json_schema_extra={
"x-widget": "custom",
"x-icon": "graduation-cap",
"advanced": True,
},
)
"""学习模型配置,用于表达方式学习和黑话学习;留空时自动继用 utils 模型"""
planner: TaskConfig = Field(
default_factory=TaskConfig,
json_schema_extra={

View File

@@ -31,7 +31,7 @@ if TYPE_CHECKING:
logger = get_logger("expressor")
express_learn_model = LLMServiceClient(
task_name="replyer", request_type="expression.learner"
task_name="learner", request_type="expression.learner"
)
summary_model = LLMServiceClient(task_name="utils", request_type="expression.summary")

View File

@@ -23,7 +23,7 @@ from .expression_utils import is_single_char_jargon
logger = get_logger("jargon")
llm_inference = LLMServiceClient(task_name="utils", request_type="jargon.inference")
llm_inference = LLMServiceClient(task_name="learner", request_type="jargon.inference")
class JargonEntry(TypedDict):

View File

@@ -111,6 +111,10 @@ class LLMOrchestrator:
task_config = getattr(model_task_config, self.task_name, None)
if not isinstance(task_config, TaskConfig):
raise ValueError(f"未找到名为 '{self.task_name}' 的任务配置")
if self.task_name == "learner" and not any(str(model_name).strip() for model_name in task_config.model_list):
fallback_task_config = getattr(model_task_config, "utils", None)
if isinstance(fallback_task_config, TaskConfig):
return fallback_task_config
return task_config
def _refresh_task_config(self) -> TaskConfig: