feat:模型支持高级配置

This commit is contained in:
SengokuCola
2026-05-04 13:50:53 +08:00
parent 09f6a89b98
commit c5cd47adc2
8 changed files with 46 additions and 41 deletions

View File

@@ -53,6 +53,7 @@ export interface PackTaskConfigs {
utils_small?: PackTaskConfig utils_small?: PackTaskConfig
tool_use?: PackTaskConfig tool_use?: PackTaskConfig
replyer?: PackTaskConfig replyer?: PackTaskConfig
learner?: PackTaskConfig
planner?: PackTaskConfig planner?: PackTaskConfig
vlm?: PackTaskConfig vlm?: PackTaskConfig
voice?: PackTaskConfig voice?: PackTaskConfig

View File

@@ -106,6 +106,7 @@ function ModelConfigPageContent() {
const [jumpToPage, setJumpToPage] = useState('') const [jumpToPage, setJumpToPage] = useState('')
const [advancedTemperatureMode, setAdvancedTemperatureMode] = useState(false) const [advancedTemperatureMode, setAdvancedTemperatureMode] = useState(false)
const [advancedTaskSettingsVisible, setAdvancedTaskSettingsVisible] = useState(false)
// 模型 Combobox 状态 // 模型 Combobox 状态
const [modelComboboxOpen, setModelComboboxOpen] = useState(false) const [modelComboboxOpen, setModelComboboxOpen] = useState(false)
@@ -155,7 +156,9 @@ function ModelConfigPageContent() {
// 检查是否有模型 // 检查是否有模型
if (!task.model_list || task.model_list.length === 0) { if (!task.model_list || task.model_list.length === 0) {
emptyTaskList.push(key) if (key !== 'learner') {
emptyTaskList.push(key)
}
continue continue
} }
@@ -939,14 +942,26 @@ function ModelConfigPageContent() {
{/* 模型任务配置标签页 */} {/* 模型任务配置标签页 */}
<TabsContent value="tasks" className="space-y-6 mt-0"> <TabsContent value="tasks" className="space-y-6 mt-0">
<p className="text-sm text-muted-foreground"> <div className="flex flex-col gap-3 sm:flex-row sm:items-center sm:justify-between">
使 <p className="text-sm text-muted-foreground">
</p> 使
</p>
{taskConfigSchema?.fields.some((field) => field.advanced) && (
<Button
type="button"
variant={advancedTaskSettingsVisible ? 'secondary' : 'outline'}
size="sm"
onClick={() => setAdvancedTaskSettingsVisible((current) => !current)}
>
</Button>
)}
</div>
{taskConfig && taskConfigSchema && ( {taskConfig && taskConfigSchema && (
<div className="grid gap-4 sm:gap-6"> <div className="grid gap-4 sm:gap-6">
{taskConfigSchema.fields {taskConfigSchema.fields
.filter(f => f.type === 'object') .filter(f => f.type === 'object' && (advancedTaskSettingsVisible || !f.advanced))
.map((field, index) => { .map((field, index) => {
const desc = field.description || field.name const desc = field.description || field.name
const commaIdx = desc.search(/[,]/) const commaIdx = desc.search(/[,]/)

View File

@@ -59,7 +59,7 @@ LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute() A_MEMORIX_LEGACY_CONFIG_PATH: Path = (CONFIG_DIR / "a_memorix.toml").resolve().absolute()
MMC_VERSION: str = "1.0.0" MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.10.1" CONFIG_VERSION: str = "8.10.1"
MODEL_CONFIG_VERSION: str = "1.14.6" MODEL_CONFIG_VERSION: str = "1.14.8"
logger = get_logger("config") logger = get_logger("config")

View File

@@ -32,6 +32,13 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"slow_threshold": 120.0, "slow_threshold": 120.0,
"selection_strategy": "random", "selection_strategy": "random",
}, },
"learner": {
"model_list": [],
"max_tokens": 4096,
"temperature": 0.5,
"slow_threshold": 15.0,
"selection_strategy": "random",
},
"planner": { "planner": {
"model_list": ["deepseek-v4-flash"], "model_list": ["deepseek-v4-flash"],
"max_tokens": 8000, "max_tokens": 8000,
@@ -39,13 +46,6 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"slow_threshold": 12.0, "slow_threshold": 12.0,
"selection_strategy": "random", "selection_strategy": "random",
}, },
"vlm": {
"model_list": ["qwen3.5-flash"],
"max_tokens": 512,
"temperature": 0.3,
"slow_threshold": 15.0,
"selection_strategy": "random",
},
"voice": { "voice": {
"model_list": [""], "model_list": [""],
"max_tokens": 1024, "max_tokens": 1024,
@@ -53,13 +53,6 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"slow_threshold": 12.0, "slow_threshold": 12.0,
"selection_strategy": "random", "selection_strategy": "random",
}, },
"embedding": {
"model_list": ["qwen3-embedding"],
"max_tokens": 1024,
"temperature": 0.3,
"slow_threshold": 5.0,
"selection_strategy": "random",
},
} }
DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [ DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [
@@ -89,24 +82,6 @@ DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [
"price_out": 2.0, "price_out": 2.0,
"visual": False, "visual": False,
"extra_params": {"enable_thinking": "false"}, "extra_params": {"enable_thinking": "false"},
},
{
"model_identifier": "qwen3.5-flash",
"name": "qwen3.5-flash",
"api_provider": "BaiLian",
"price_in": 0.2,
"price_out": 2.0,
"visual": True,
"extra_params": {"enable_thinking": "false"},
},
{
"model_identifier": "text-embedding-v4",
"name": "qwen3-embedding",
"api_provider": "BaiLian",
"price_in": 0.5,
"price_out": 0.5,
"visual": False,
"extra_params": {},
} }
] ]

View File

@@ -436,8 +436,18 @@ class ModelTaskConfig(ConfigBase):
"x-icon": "message-square", "x-icon": "message-square",
}, },
) )
"""首要回复模型配置, 还用于表达器和表达方式学习""" """首要回复模型配置"""
learner: TaskConfig = Field(
default_factory=TaskConfig,
json_schema_extra={
"x-widget": "custom",
"x-icon": "graduation-cap",
"advanced": True,
},
)
"""学习模型配置,用于表达方式学习和黑话学习;留空时自动继用 utils 模型"""
planner: TaskConfig = Field( planner: TaskConfig = Field(
default_factory=TaskConfig, default_factory=TaskConfig,
json_schema_extra={ json_schema_extra={

View File

@@ -31,7 +31,7 @@ if TYPE_CHECKING:
logger = get_logger("expressor") logger = get_logger("expressor")
express_learn_model = LLMServiceClient( express_learn_model = LLMServiceClient(
task_name="replyer", request_type="expression.learner" task_name="learner", request_type="expression.learner"
) )
summary_model = LLMServiceClient(task_name="utils", request_type="expression.summary") summary_model = LLMServiceClient(task_name="utils", request_type="expression.summary")

View File

@@ -23,7 +23,7 @@ from .expression_utils import is_single_char_jargon
logger = get_logger("jargon") logger = get_logger("jargon")
llm_inference = LLMServiceClient(task_name="utils", request_type="jargon.inference") llm_inference = LLMServiceClient(task_name="learner", request_type="jargon.inference")
class JargonEntry(TypedDict): class JargonEntry(TypedDict):

View File

@@ -111,6 +111,10 @@ class LLMOrchestrator:
task_config = getattr(model_task_config, self.task_name, None) task_config = getattr(model_task_config, self.task_name, None)
if not isinstance(task_config, TaskConfig): if not isinstance(task_config, TaskConfig):
raise ValueError(f"未找到名为 '{self.task_name}' 的任务配置") raise ValueError(f"未找到名为 '{self.task_name}' 的任务配置")
if self.task_name == "learner" and not any(str(model_name).strip() for model_name in task_config.model_list):
fallback_task_config = getattr(model_task_config, "utils", None)
if isinstance(fallback_task_config, TaskConfig):
return fallback_task_config
return task_config return task_config
def _refresh_task_config(self) -> TaskConfig: def _refresh_task_config(self) -> TaskConfig: