update:升级表达方式0.1

This commit is contained in:
SengokuCola
2025-09-26 12:50:50 +08:00
parent 3a2685cf26
commit e79da24c23
4 changed files with 69 additions and 2 deletions

View File

@@ -55,6 +55,31 @@ def init_prompt() -> None:
"""
Prompt(learn_style_prompt, "learn_style_prompt")
match_expression_context_prompt = """
**聊天内容**
{chat_str}
**从聊天内容总结的表达方式pairs**
{expression_pairs}
请你为上面的每一条表达方式,找到该表达方式的原文句子,并输出匹配结果。
如果找不到原句,就不输出该句的匹配结果。
以json格式输出
格式如下:
{{
"expression_pair": "表达方式pair的序号数字",
"context": "与表达方式对应的原文句子的原始内容,不要修改原文句子的内容",
}}
{{
"expression_pair": "表达方式pair的序号数字",
"context": "与表达方式对应的原文句子的原始内容,不要修改原文句子的内容",
}}
...
现在请你输出匹配结果:
"""
Prompt(match_expression_context_prompt, "match_expression_context_prompt")
class ExpressionLearner:
def __init__(self, chat_id: str) -> None:
@@ -261,6 +286,28 @@ class ExpressionLearner:
expr.delete_instance()
return learnt_expressions
async def match_expression_context(self, expression_pairs: List[Tuple[str, str]], random_msg_match_str: str) -> List[Tuple[str, str, str]]:
prompt = "match_expression_context_prompt"
prompt = await global_prompt_manager.format_prompt(
prompt,
expression_pairs=expression_pairs,
chat_str=random_msg_match_str,
)
match_responses = []
# 解析所有match结果到 match_response
matched_expressions = []
for match_response in match_responses:
#exp序号
match_response["expression_pair"]
matched_expressions.append((match_response["expression_pair"], match_response["context"]))
response, _ = await self.express_learn_model.generate_response_async(prompt, temperature=0.3)
async def learn_expression(self, num: int = 10) -> Optional[Tuple[List[Tuple[str, str, str]], str]]:
"""从指定聊天流学习表达方式
@@ -286,6 +333,7 @@ class ExpressionLearner:
chat_id: str = random_msg[0].chat_id
# random_msg_str: str = build_readable_messages(random_msg, timestamp_mode="normal")
random_msg_str: str = await build_anonymous_messages(random_msg)
random_msg_match_str: str = await build_bare_messages(random_msg)
prompt: str = await global_prompt_manager.format_prompt(
@@ -306,7 +354,12 @@ class ExpressionLearner:
expressions: List[Tuple[str, str, str]] = self.parse_expression_response(response, chat_id)
return expressions, chat_id
matched_expressions = await self.match_expression_context(expressions, random_msg_match_str)
return matched_expressions, chat_id
def parse_expression_response(self, response: str, chat_id: str) -> List[Tuple[str, str, str]]:
"""

View File

@@ -302,6 +302,13 @@ class Expression(BaseModel):
situation = TextField()
style = TextField()
count = FloatField()
# new mode fields
context = TextField(null=True)
context_words = TextField(null=True)
full_context = TextField(null=True)
full_context_embedding = TextField(null=True)
last_active_time = FloatField()
chat_id = TextField(index=True)
type = TextField()

View File

@@ -106,6 +106,9 @@ class MessageReceiveConfig(ConfigBase):
class ExpressionConfig(ConfigBase):
"""表达配置类"""
mode: Literal["llm", "context", "full-context"] = "context"
"""表达方式模式可选llm模式context上下文模式full-context 完整上下文嵌入模式"""
learning_list: list[list] = field(default_factory=lambda: [])
"""
表达学习配置列表,支持按聊天流配置

View File

@@ -1,5 +1,5 @@
[inner]
version = "6.15.0"
version = "6.15.1"
#----以下是给开发人员阅读的,如果你只是部署了麦麦,不需要阅读----
#如果你想要修改配置文件请递增version的值
@@ -47,6 +47,10 @@ private_plan_style = """请你根据聊天内容,用户的最新消息和以下
3.某句话如果已经被回复过,不要重复回复"""
[expression]
# 表达方式模式
mode = "context"
# 可选llm模式context上下文模式full-context 完整上下文嵌入模式
# 表达学习配置
learning_list = [ # 表达学习配置列表,支持按聊天流配置
["", "enable", "enable", "1.0"], # 全局配置使用表达启用学习学习强度1.0