@@ -35,9 +35,9 @@ name = "SiliconFlow"
base_url = "https://api.siliconflow.cn/v1"
api_key = "your-siliconflow-api-key"
client_type = "openai"
max_retry = 2
max_retry = 3
timeout = 120
retry_interval = 10
retry_interval = 5
[ [ models ] ] # 模型(可以配置多个)
@@ -49,11 +49,22 @@ price_out = 8.0 # 输出价格( 用于API调用统计, 单
#force_stream_mode = true # 强制流式输出模式( 若模型不支持非流式输出, 请取消该注释, 启用强制流式输出, 若无该字段, 默认值为false)
[ [ models ] ]
model_identifier = "deepseek-ai/DeepSeek-V3"
name = "siliconflow-deepseek-v3"
model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp "
name = "siliconflow-deepseek-v3.2 "
api_provider = "SiliconFlow"
price_in = 2.0
price_out = 8 .0
price_out = 3 .0
[ models . extra_params ] # 可选的额外参数配置
enable_thinking = false # 不启用思考
[ [ models ] ]
model_identifier = "deepseek-ai/DeepSeek-V3.2-Exp"
name = "siliconflow-deepseek-v3.2-think"
api_provider = "SiliconFlow"
price_in = 2.0
price_out = 3.0
[ models . extra_params ] # 可选的额外参数配置
enable_thinking = true # 不启用思考
[ [ models ] ]
model_identifier = "deepseek-ai/DeepSeek-R1"
@@ -63,15 +74,6 @@ price_in = 4.0
price_out = 16.0
[ [ models ] ]
model_identifier = "Qwen/Qwen3-8B"
name = "qwen3-8b"
api_provider = "SiliconFlow"
price_in = 0
price_out = 0
[ models . extra_params ] # 可选的额外参数配置
enable_thinking = false # 不启用思考
[ [ models ] ]
model_identifier = "Qwen/Qwen3-30B-A3B-Instruct-2507"
name = "qwen3-30b"
@@ -80,8 +82,8 @@ price_in = 0.7
price_out = 2.8
[ [ models ] ]
model_identifier = "Qwen/Qwen2.5 -VL-72 B-Instruct"
name = "qwen2.5 -vl-72b "
model_identifier = "Qwen/Qwen3 -VL-30B-A3 B-Instruct"
name = "qwen3 -vl-30 "
api_provider = "SiliconFlow"
price_in = 4.13
price_out = 4.13
@@ -102,12 +104,12 @@ price_out = 0
[ model_task_config . utils ] # 在麦麦的一些组件中使用的模型,例如表情包模块,取名模块,关系模块,麦麦的情绪变化等,是麦麦必须的模型
model_list = [ "siliconflow-deepseek-v3" ] # 使用的模型列表,每个子项对应上面的模型名称(name)
model_list = [ "siliconflow-deepseek-v3.2 " ] # 使用的模型列表,每个子项对应上面的模型名称(name)
temperature = 0.2 # 模型温度, 新V3建议0.1-0.3
max_tokens = 2048 # 最大输出token数
[ model_task_config . utils_small ] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
model_list = [ "qwen3-8b" , "qwen3- 30b" ]
model_list = [ "qwen3-30b" ]
temperature = 0.7
max_tokens = 2048
@@ -117,17 +119,17 @@ temperature = 0.7
max_tokens = 800
[ model_task_config . replyer ] # 首要回复模型,还用于表达器和表达方式学习
model_list = [ "siliconflow-deepseek-v3" , "siliconflow-deepseek-r1" ]
model_list = [ "siliconflow-deepseek-v3.2-think " , "siliconflow-deepseek-r1" , "siliconflow-deepseek-v3.2" ]
temperature = 0.3 # 模型温度, 新V3建议0.1-0.3
max_tokens = 800
[ model_task_config . planner ] #决策:负责决定麦麦该什么时候回复的模型
model_list = [ "siliconflow-deepseek-v3" ]
model_list = [ "siliconflow-deepseek-v3.2 " ]
temperature = 0.3
max_tokens = 800
[ model_task_config . vlm ] # 图像识别模型
model_list = [ "qwen2.5 -vl-72b " ]
model_list = [ "qwen3 -vl-30 " ]
max_tokens = 256
[ model_task_config . voice ] # 语音识别模型
@@ -140,16 +142,16 @@ model_list = ["bge-m3"]
#------------LPMM知识库模型------------
[ model_task_config . lpmm_entity_extract ] # 实体提取模型
model_list = [ "siliconflow-deepseek-v3" ]
model_list = [ "siliconflow-deepseek-v3.2 " ]
temperature = 0.2
max_tokens = 800
[ model_task_config . lpmm_rdf_build ] # RDF构建模型
model_list = [ "siliconflow-deepseek-v3" ]
model_list = [ "siliconflow-deepseek-v3.2 " ]
temperature = 0.2
max_tokens = 800
[ model_task_config . lpmm_qa ] # 问答模型
model_list = [ "qwen3-30b " ]
model_list = [ "siliconflow-deepseek-v3.2 " ]
temperature = 0.7
max_tokens = 800