feat:表达方式主动提问
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
[inner]
|
||||
version = "6.21.8"
|
||||
version = "6.23.0"
|
||||
|
||||
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
|
||||
#如果你想要修改配置文件,请递增version的值
|
||||
@@ -80,6 +80,9 @@ expression_groups = [
|
||||
# 注意:如果为群聊,则需要设置为group,如果设置为私聊,则需要设置为private
|
||||
]
|
||||
|
||||
reflect = false # 是否启用表达反思(Bot主动向管理员询问表达方式是否合适)
|
||||
reflect_operator_id = "" # 表达反思操作员ID,格式:platform:id:type (例如 "qq:123456:private" 或 "qq:654321:group")
|
||||
|
||||
|
||||
[chat] #麦麦的聊天设置
|
||||
talk_value = 1 #聊天频率,越小越沉默,范围0-1
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
[inner]
|
||||
version = "1.7.8"
|
||||
version = "1.8.0"
|
||||
|
||||
# 配置文件版本号迭代规则同bot_config.toml
|
||||
|
||||
@@ -66,6 +66,32 @@ price_out = 3.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = true # 不启用思考
|
||||
|
||||
[[models]]
|
||||
model_identifier = "Qwen/Qwen3-Next-80B-A3B-Instruct"
|
||||
name = "qwen3-next-80b"
|
||||
api_provider = "SiliconFlow"
|
||||
price_in = 1.0
|
||||
price_out = 4.0
|
||||
|
||||
[[models]]
|
||||
model_identifier = "zai-org/GLM-4.6"
|
||||
name = "glm-4.6"
|
||||
api_provider = "SiliconFlow"
|
||||
price_in = 3.5
|
||||
price_out = 14.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = false # 不启用思考
|
||||
|
||||
[[models]]
|
||||
model_identifier = "zai-org/GLM-4.6"
|
||||
name = "glm-4.6-think"
|
||||
api_provider = "SiliconFlow"
|
||||
price_in = 3.5
|
||||
price_out = 14.0
|
||||
[models.extra_params] # 可选的额外参数配置
|
||||
enable_thinking = true # 不启用思考
|
||||
|
||||
|
||||
[[models]]
|
||||
model_identifier = "deepseek-ai/DeepSeek-R1"
|
||||
name = "siliconflow-deepseek-r1"
|
||||
@@ -120,7 +146,7 @@ temperature = 0.7
|
||||
max_tokens = 800
|
||||
|
||||
[model_task_config.replyer] # 首要回复模型,还用于表达器和表达方式学习
|
||||
model_list = ["siliconflow-deepseek-v3.2-think","siliconflow-glm-4.6-think","siliconflow-glm-4.6"]
|
||||
model_list = ["siliconflow-deepseek-v3.2","siliconflow-deepseek-v3.2-think","siliconflow-glm-4.6","siliconflow-glm-4.6-think"]
|
||||
temperature = 0.3 # 模型温度,新V3建议0.1-0.3
|
||||
max_tokens = 2048
|
||||
|
||||
|
||||
Reference in New Issue
Block a user