调整对应的调用
This commit is contained in:
@@ -2,13 +2,15 @@ from src.chat.message_receive.chat_stream import get_chat_manager
|
||||
import time
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.config.config import global_config
|
||||
from src.config.config import model_config
|
||||
from src.chat.message_receive.message import MessageRecvS4U
|
||||
from src.mais4u.mais4u_chat.s4u_msg_processor import S4UMessageProcessor
|
||||
from src.mais4u.mais4u_chat.internal_manager import internal_manager
|
||||
from src.common.logger import get_logger
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def init_prompt():
|
||||
Prompt(
|
||||
"""
|
||||
@@ -32,10 +34,8 @@ def init_prompt():
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
class MaiThinking:
|
||||
def __init__(self,chat_id):
|
||||
def __init__(self, chat_id):
|
||||
self.chat_id = chat_id
|
||||
self.chat_stream = get_chat_manager().get_stream(chat_id)
|
||||
self.platform = self.chat_stream.platform
|
||||
@@ -44,11 +44,11 @@ class MaiThinking:
|
||||
self.is_group = True
|
||||
else:
|
||||
self.is_group = False
|
||||
|
||||
|
||||
self.s4u_message_processor = S4UMessageProcessor()
|
||||
|
||||
|
||||
self.mind = ""
|
||||
|
||||
|
||||
self.memory_block = ""
|
||||
self.relation_info_block = ""
|
||||
self.time_block = ""
|
||||
@@ -59,17 +59,13 @@ class MaiThinking:
|
||||
self.identity = ""
|
||||
self.sender = ""
|
||||
self.target = ""
|
||||
|
||||
self.thinking_model = LLMRequest(
|
||||
model=global_config.model.replyer_1,
|
||||
request_type="thinking",
|
||||
)
|
||||
|
||||
self.thinking_model = LLMRequest(model_set=model_config.model_task_config.replyer_1, request_type="thinking")
|
||||
|
||||
async def do_think_before_response(self):
|
||||
pass
|
||||
|
||||
async def do_think_after_response(self,reponse:str):
|
||||
|
||||
async def do_think_after_response(self, reponse: str):
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"after_response_think_prompt",
|
||||
mind=self.mind,
|
||||
@@ -85,47 +81,44 @@ class MaiThinking:
|
||||
sender=self.sender,
|
||||
target=self.target,
|
||||
)
|
||||
|
||||
|
||||
result, _ = await self.thinking_model.generate_response_async(prompt)
|
||||
self.mind = result
|
||||
|
||||
|
||||
logger.info(f"[{self.chat_id}] 思考前想法:{self.mind}")
|
||||
# logger.info(f"[{self.chat_id}] 思考前prompt:{prompt}")
|
||||
logger.info(f"[{self.chat_id}] 思考后想法:{self.mind}")
|
||||
|
||||
|
||||
|
||||
msg_recv = await self.build_internal_message_recv(self.mind)
|
||||
await self.s4u_message_processor.process_message(msg_recv)
|
||||
internal_manager.set_internal_state(self.mind)
|
||||
|
||||
|
||||
|
||||
async def do_think_when_receive_message(self):
|
||||
pass
|
||||
|
||||
async def build_internal_message_recv(self,message_text:str):
|
||||
|
||||
|
||||
async def build_internal_message_recv(self, message_text: str):
|
||||
msg_id = f"internal_{time.time()}"
|
||||
|
||||
|
||||
message_dict = {
|
||||
"message_info": {
|
||||
"message_id": msg_id,
|
||||
"time": time.time(),
|
||||
"user_info": {
|
||||
"user_id": "internal", # 内部用户ID
|
||||
"user_nickname": "内心", # 内部昵称
|
||||
"platform": self.platform, # 平台标记为 internal
|
||||
"user_id": "internal", # 内部用户ID
|
||||
"user_nickname": "内心", # 内部昵称
|
||||
"platform": self.platform, # 平台标记为 internal
|
||||
# 其他 user_info 字段按需补充
|
||||
},
|
||||
"platform": self.platform, # 平台
|
||||
"platform": self.platform, # 平台
|
||||
# 其他 message_info 字段按需补充
|
||||
},
|
||||
"message_segment": {
|
||||
"type": "text", # 消息类型
|
||||
"data": message_text, # 消息内容
|
||||
"type": "text", # 消息类型
|
||||
"data": message_text, # 消息内容
|
||||
# 其他 segment 字段按需补充
|
||||
},
|
||||
"raw_message": message_text, # 原始消息内容
|
||||
"processed_plain_text": message_text, # 处理后的纯文本
|
||||
"raw_message": message_text, # 原始消息内容
|
||||
"processed_plain_text": message_text, # 处理后的纯文本
|
||||
# 下面这些字段可选,根据 MessageRecv 需要
|
||||
"is_emoji": False,
|
||||
"has_emoji": False,
|
||||
@@ -139,45 +132,36 @@ class MaiThinking:
|
||||
"priority_info": {"message_priority": 10.0}, # 内部消息可设高优先级
|
||||
"interest_value": 1.0,
|
||||
}
|
||||
|
||||
|
||||
if self.is_group:
|
||||
message_dict["message_info"]["group_info"] = {
|
||||
"platform": self.platform,
|
||||
"group_id": self.chat_stream.group_info.group_id,
|
||||
"group_name": self.chat_stream.group_info.group_name,
|
||||
}
|
||||
|
||||
|
||||
msg_recv = MessageRecvS4U(message_dict)
|
||||
msg_recv.chat_info = self.chat_info
|
||||
msg_recv.chat_stream = self.chat_stream
|
||||
msg_recv.is_internal = True
|
||||
|
||||
|
||||
return msg_recv
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class MaiThinkingManager:
|
||||
def __init__(self):
|
||||
self.mai_think_list = []
|
||||
|
||||
def get_mai_think(self,chat_id):
|
||||
|
||||
def get_mai_think(self, chat_id):
|
||||
for mai_think in self.mai_think_list:
|
||||
if mai_think.chat_id == chat_id:
|
||||
return mai_think
|
||||
mai_think = MaiThinking(chat_id)
|
||||
self.mai_think_list.append(mai_think)
|
||||
return mai_think
|
||||
|
||||
|
||||
|
||||
mai_thinking_manager = MaiThinkingManager()
|
||||
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
import json
|
||||
import time
|
||||
|
||||
from json_repair import repair_json
|
||||
from src.chat.message_receive.message import MessageRecv
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
|
||||
from src.config.config import global_config
|
||||
from src.config.config import global_config, model_config
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.manager.async_task_manager import AsyncTask, async_task_manager
|
||||
from src.plugin_system.apis import send_api
|
||||
from json_repair import repair_json
|
||||
|
||||
from src.mais4u.s4u_config import s4u_config
|
||||
|
||||
logger = get_logger("action")
|
||||
@@ -32,7 +34,7 @@ BODY_CODE = {
|
||||
"帅气的姿势": "010_0190",
|
||||
"另一个帅气的姿势": "010_0191",
|
||||
"手掌朝前可爱": "010_0210",
|
||||
"平静,双手后放":"平静,双手后放",
|
||||
"平静,双手后放": "平静,双手后放",
|
||||
"思考": "思考",
|
||||
"优雅,左手放在腰上": "优雅,左手放在腰上",
|
||||
"一般": "一般",
|
||||
@@ -94,19 +96,15 @@ class ChatAction:
|
||||
self.body_action_cooldown: dict[str, int] = {}
|
||||
|
||||
print(s4u_config.models.motion)
|
||||
print(global_config.model.emotion)
|
||||
|
||||
self.action_model = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.7,
|
||||
request_type="motion",
|
||||
)
|
||||
print(model_config.model_task_config.emotion)
|
||||
|
||||
self.last_change_time = 0
|
||||
self.action_model = LLMRequest(model_set=model_config.model_task_config.emotion, request_type="motion")
|
||||
|
||||
self.last_change_time: float = 0
|
||||
|
||||
async def send_action_update(self):
|
||||
"""发送动作更新到前端"""
|
||||
|
||||
|
||||
body_code = BODY_CODE.get(self.body_action, "")
|
||||
await send_api.custom_to_stream(
|
||||
message_type="body_action",
|
||||
@@ -115,13 +113,11 @@ class ChatAction:
|
||||
storage_message=False,
|
||||
show_log=True,
|
||||
)
|
||||
|
||||
|
||||
|
||||
async def update_action_by_message(self, message: MessageRecv):
|
||||
self.regression_count = 0
|
||||
|
||||
message_time = message.message_info.time
|
||||
message_time: float = message.message_info.time # type: ignore
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
@@ -147,13 +143,13 @@ class ChatAction:
|
||||
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
|
||||
|
||||
try:
|
||||
# 冷却池处理:过滤掉冷却中的动作
|
||||
self._update_body_action_cooldown()
|
||||
available_actions = [k for k in BODY_CODE.keys() if k not in self.body_action_cooldown]
|
||||
all_actions = "\n".join(available_actions)
|
||||
|
||||
|
||||
prompt = await global_prompt_manager.format_prompt(
|
||||
"change_action_prompt",
|
||||
chat_talking_prompt=chat_talking_prompt,
|
||||
@@ -163,19 +159,18 @@ class ChatAction:
|
||||
)
|
||||
|
||||
logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
|
||||
response, (reasoning_content, _, _) = await self.action_model.generate_response_async(
|
||||
prompt=prompt, temperature=0.7
|
||||
)
|
||||
logger.info(f"response: {response}")
|
||||
logger.info(f"reasoning_content: {reasoning_content}")
|
||||
|
||||
action_data = json.loads(repair_json(response))
|
||||
|
||||
if action_data:
|
||||
if action_data := json.loads(repair_json(response)):
|
||||
# 记录原动作,切换后进入冷却
|
||||
prev_body_action = self.body_action
|
||||
new_body_action = action_data.get("body_action", self.body_action)
|
||||
if new_body_action != prev_body_action:
|
||||
if prev_body_action:
|
||||
self.body_action_cooldown[prev_body_action] = 3
|
||||
if new_body_action != prev_body_action and prev_body_action:
|
||||
self.body_action_cooldown[prev_body_action] = 3
|
||||
self.body_action = new_body_action
|
||||
self.head_action = action_data.get("head_action", self.head_action)
|
||||
# 发送动作更新
|
||||
@@ -213,7 +208,6 @@ class ChatAction:
|
||||
prompt_personality = global_config.personality.personality_core
|
||||
indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
|
||||
try:
|
||||
|
||||
# 冷却池处理:过滤掉冷却中的动作
|
||||
self._update_body_action_cooldown()
|
||||
available_actions = [k for k in BODY_CODE.keys() if k not in self.body_action_cooldown]
|
||||
@@ -228,17 +222,17 @@ class ChatAction:
|
||||
)
|
||||
|
||||
logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
|
||||
response, (reasoning_content, _, _) = await self.action_model.generate_response_async(
|
||||
prompt=prompt, temperature=0.7
|
||||
)
|
||||
logger.info(f"response: {response}")
|
||||
logger.info(f"reasoning_content: {reasoning_content}")
|
||||
|
||||
action_data = json.loads(repair_json(response))
|
||||
if action_data:
|
||||
if action_data := json.loads(repair_json(response)):
|
||||
prev_body_action = self.body_action
|
||||
new_body_action = action_data.get("body_action", self.body_action)
|
||||
if new_body_action != prev_body_action:
|
||||
if prev_body_action:
|
||||
self.body_action_cooldown[prev_body_action] = 6
|
||||
if new_body_action != prev_body_action and prev_body_action:
|
||||
self.body_action_cooldown[prev_body_action] = 6
|
||||
self.body_action = new_body_action
|
||||
# 发送动作更新
|
||||
await self.send_action_update()
|
||||
@@ -306,9 +300,6 @@ class ActionManager:
|
||||
return new_action_state
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
init_prompt()
|
||||
|
||||
action_manager = ActionManager()
|
||||
|
||||
@@ -137,7 +137,7 @@ class MessageSenderContainer:
|
||||
await self.storage.store_message(bot_message, self.chat_stream)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[{self.chat_stream.get_stream_name()}] 消息发送或存储时出现错误: {e}", exc_info=True)
|
||||
logger.error(f"[消息流: {self.chat_stream.stream_id}] 消息发送或存储时出现错误: {e}", exc_info=True)
|
||||
|
||||
finally:
|
||||
# CRUCIAL: Always call task_done() for any item that was successfully retrieved.
|
||||
|
||||
@@ -6,7 +6,7 @@ from src.chat.message_receive.message import MessageRecv
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.common.logger import get_logger
|
||||
from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
|
||||
from src.config.config import global_config
|
||||
from src.config.config import global_config, model_config
|
||||
from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
|
||||
from src.manager.async_task_manager import AsyncTask, async_task_manager
|
||||
from src.plugin_system.apis import send_api
|
||||
@@ -114,18 +114,12 @@ class ChatMood:
|
||||
|
||||
self.regression_count: int = 0
|
||||
|
||||
self.mood_model = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.7,
|
||||
request_type="mood_text",
|
||||
)
|
||||
self.mood_model = LLMRequest(model_set=model_config.model_task_config.emotion, request_type="mood_text")
|
||||
self.mood_model_numerical = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.4,
|
||||
request_type="mood_numerical",
|
||||
model_set=model_config.model_task_config.emotion, request_type="mood_numerical"
|
||||
)
|
||||
|
||||
self.last_change_time = 0
|
||||
self.last_change_time: float = 0
|
||||
|
||||
# 发送初始情绪状态到ws端
|
||||
asyncio.create_task(self.send_emotion_update(self.mood_values))
|
||||
@@ -164,7 +158,7 @@ class ChatMood:
|
||||
async def update_mood_by_message(self, message: MessageRecv):
|
||||
self.regression_count = 0
|
||||
|
||||
message_time = message.message_info.time
|
||||
message_time: float = message.message_info.time # type: ignore
|
||||
message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
|
||||
chat_id=self.chat_id,
|
||||
timestamp_start=self.last_change_time,
|
||||
@@ -199,7 +193,9 @@ class ChatMood:
|
||||
mood_state=self.mood_state,
|
||||
)
|
||||
logger.debug(f"text mood prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
|
||||
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
|
||||
prompt=prompt, temperature=0.7
|
||||
)
|
||||
logger.info(f"text mood response: {response}")
|
||||
logger.debug(f"text mood reasoning_content: {reasoning_content}")
|
||||
return response
|
||||
@@ -216,8 +212,8 @@ class ChatMood:
|
||||
fear=self.mood_values["fear"],
|
||||
)
|
||||
logger.debug(f"numerical mood prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model_numerical.generate_response_async(
|
||||
prompt=prompt
|
||||
response, (reasoning_content, _, _) = await self.mood_model_numerical.generate_response_async(
|
||||
prompt=prompt, temperature=0.4
|
||||
)
|
||||
logger.info(f"numerical mood response: {response}")
|
||||
logger.debug(f"numerical mood reasoning_content: {reasoning_content}")
|
||||
@@ -276,7 +272,9 @@ class ChatMood:
|
||||
mood_state=self.mood_state,
|
||||
)
|
||||
logger.debug(f"text regress prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model.generate_response_async(prompt=prompt)
|
||||
response, (reasoning_content, _, _) = await self.mood_model.generate_response_async(
|
||||
prompt=prompt, temperature=0.7
|
||||
)
|
||||
logger.info(f"text regress response: {response}")
|
||||
logger.debug(f"text regress reasoning_content: {reasoning_content}")
|
||||
return response
|
||||
@@ -293,8 +291,9 @@ class ChatMood:
|
||||
fear=self.mood_values["fear"],
|
||||
)
|
||||
logger.debug(f"numerical regress prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await self.mood_model_numerical.generate_response_async(
|
||||
prompt=prompt
|
||||
response, (reasoning_content, _, _) = await self.mood_model_numerical.generate_response_async(
|
||||
prompt=prompt,
|
||||
temperature=0.4,
|
||||
)
|
||||
logger.info(f"numerical regress response: {response}")
|
||||
logger.debug(f"numerical regress reasoning_content: {reasoning_content}")
|
||||
@@ -447,6 +446,7 @@ class MoodManager:
|
||||
# 发送初始情绪状态到ws端
|
||||
asyncio.create_task(new_mood.send_emotion_update(new_mood.mood_values))
|
||||
|
||||
|
||||
if ENABLE_S4U:
|
||||
init_prompt()
|
||||
mood_manager = MoodManager()
|
||||
|
||||
@@ -150,19 +150,18 @@ class PromptBuilder:
|
||||
relation_prompt = ""
|
||||
if global_config.relationship.enable_relationship and who_chat_in_group:
|
||||
relationship_fetcher = relationship_fetcher_manager.get_fetcher(chat_stream.stream_id)
|
||||
|
||||
|
||||
# 将 (platform, user_id, nickname) 转换为 person_id
|
||||
person_ids = []
|
||||
for person in who_chat_in_group:
|
||||
person_id = PersonInfoManager.get_person_id(person[0], person[1])
|
||||
person_ids.append(person_id)
|
||||
|
||||
|
||||
# 使用 RelationshipFetcher 的 build_relation_info 方法,设置 points_num=3 保持与原来相同的行为
|
||||
relation_info_list = await asyncio.gather(
|
||||
*[relationship_fetcher.build_relation_info(person_id, points_num=3) for person_id in person_ids]
|
||||
)
|
||||
relation_info = "".join(relation_info_list)
|
||||
if relation_info:
|
||||
if relation_info := "".join(relation_info_list):
|
||||
relation_prompt = await global_prompt_manager.format_prompt(
|
||||
"relation_prompt", relation_info=relation_info
|
||||
)
|
||||
@@ -186,9 +185,9 @@ class PromptBuilder:
|
||||
timestamp=time.time(),
|
||||
limit=300,
|
||||
)
|
||||
|
||||
|
||||
talk_type = message.message_info.platform + ":" + str(message.chat_stream.user_info.user_id)
|
||||
|
||||
talk_type = f"{message.message_info.platform}:{str(message.chat_stream.user_info.user_id)}"
|
||||
|
||||
core_dialogue_list = []
|
||||
background_dialogue_list = []
|
||||
@@ -258,19 +257,19 @@ class PromptBuilder:
|
||||
all_msg_seg_list.append(msg_seg_str)
|
||||
for msg in all_msg_seg_list:
|
||||
core_msg_str += msg
|
||||
|
||||
|
||||
|
||||
|
||||
all_dialogue_prompt = get_raw_msg_before_timestamp_with_chat(
|
||||
chat_id=chat_stream.stream_id,
|
||||
timestamp=time.time(),
|
||||
limit=20,
|
||||
)
|
||||
)
|
||||
all_dialogue_prompt_str = build_readable_messages(
|
||||
all_dialogue_prompt,
|
||||
timestamp_mode="normal_no_YMD",
|
||||
show_pic=False,
|
||||
)
|
||||
|
||||
|
||||
|
||||
return core_msg_str, background_dialogue_prompt,all_dialogue_prompt_str
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
from typing import AsyncGenerator
|
||||
from src.mais4u.openai_client import AsyncOpenAIClient
|
||||
from src.config.config import global_config
|
||||
from src.config.config import global_config, model_config
|
||||
from src.chat.message_receive.message import MessageRecvS4U
|
||||
from src.mais4u.mais4u_chat.s4u_prompt import prompt_builder
|
||||
from src.common.logger import get_logger
|
||||
@@ -14,24 +14,27 @@ logger = get_logger("s4u_stream_generator")
|
||||
|
||||
class S4UStreamGenerator:
|
||||
def __init__(self):
|
||||
replyer_1_config = global_config.model.replyer_1
|
||||
provider = replyer_1_config.get("provider")
|
||||
if not provider:
|
||||
logger.error("`replyer_1` 在配置文件中缺少 `provider` 字段")
|
||||
raise ValueError("`replyer_1` 在配置文件中缺少 `provider` 字段")
|
||||
replyer_1_config = model_config.model_task_config.replyer_1
|
||||
model_to_use = replyer_1_config.model_list[0]
|
||||
model_info = model_config.get_model_info(model_to_use)
|
||||
if not model_info:
|
||||
logger.error(f"模型 {model_to_use} 在配置中未找到")
|
||||
raise ValueError(f"模型 {model_to_use} 在配置中未找到")
|
||||
provider_name = model_info.api_provider
|
||||
provider_info = model_config.get_provider(provider_name)
|
||||
if not provider_info:
|
||||
logger.error("`replyer_1` 找不到对应的Provider")
|
||||
raise ValueError("`replyer_1` 找不到对应的Provider")
|
||||
|
||||
api_key = os.environ.get(f"{provider.upper()}_KEY")
|
||||
base_url = os.environ.get(f"{provider.upper()}_BASE_URL")
|
||||
api_key = provider_info.api_key
|
||||
base_url = provider_info.base_url
|
||||
|
||||
if not api_key:
|
||||
logger.error(f"环境变量 {provider.upper()}_KEY 未设置")
|
||||
raise ValueError(f"环境变量 {provider.upper()}_KEY 未设置")
|
||||
logger.error(f"{provider_name}没有配置API KEY")
|
||||
raise ValueError(f"{provider_name}没有配置API KEY")
|
||||
|
||||
self.client_1 = AsyncOpenAIClient(api_key=api_key, base_url=base_url)
|
||||
self.model_1_name = replyer_1_config.get("name")
|
||||
if not self.model_1_name:
|
||||
logger.error("`replyer_1` 在配置文件中缺少 `model_name` 字段")
|
||||
raise ValueError("`replyer_1` 在配置文件中缺少 `model_name` 字段")
|
||||
self.model_1_name = model_to_use
|
||||
self.replyer_1_config = replyer_1_config
|
||||
|
||||
self.current_model_name = "unknown model"
|
||||
@@ -44,10 +47,10 @@ class S4UStreamGenerator:
|
||||
r'[^.。!??!\n\r]+(?:[.。!??!\n\r](?![\'"])|$))', # 匹配直到句子结束符
|
||||
re.UNICODE | re.DOTALL,
|
||||
)
|
||||
|
||||
self.chat_stream =None
|
||||
|
||||
async def build_last_internal_message(self,message:MessageRecvS4U,previous_reply_context:str = ""):
|
||||
|
||||
self.chat_stream = None
|
||||
|
||||
async def build_last_internal_message(self, message: MessageRecvS4U, previous_reply_context: str = ""):
|
||||
# person_id = PersonInfoManager.get_person_id(
|
||||
# message.chat_stream.user_info.platform, message.chat_stream.user_info.user_id
|
||||
# )
|
||||
@@ -71,14 +74,10 @@ class S4UStreamGenerator:
|
||||
[这是用户发来的新消息, 你需要结合上下文,对此进行回复]:
|
||||
{message.processed_plain_text}
|
||||
"""
|
||||
return True,message_txt
|
||||
return True, message_txt
|
||||
else:
|
||||
message_txt = message.processed_plain_text
|
||||
return False,message_txt
|
||||
|
||||
|
||||
|
||||
|
||||
return False, message_txt
|
||||
|
||||
async def generate_response(
|
||||
self, message: MessageRecvS4U, previous_reply_context: str = ""
|
||||
@@ -88,7 +87,7 @@ class S4UStreamGenerator:
|
||||
self.partial_response = ""
|
||||
message_txt = message.processed_plain_text
|
||||
if not message.is_internal:
|
||||
interupted,message_txt_added = await self.build_last_internal_message(message,previous_reply_context)
|
||||
interupted, message_txt_added = await self.build_last_internal_message(message, previous_reply_context)
|
||||
if interupted:
|
||||
message_txt = message_txt_added
|
||||
|
||||
@@ -105,7 +104,6 @@ class S4UStreamGenerator:
|
||||
current_client = self.client_1
|
||||
self.current_model_name = self.model_1_name
|
||||
|
||||
|
||||
extra_kwargs = {}
|
||||
if self.replyer_1_config.get("enable_thinking") is not None:
|
||||
extra_kwargs["enable_thinking"] = self.replyer_1_config.get("enable_thinking")
|
||||
|
||||
@@ -214,51 +214,49 @@ class SuperChatManager:
|
||||
def build_superchat_display_string(self, chat_id: str, max_count: int = 10) -> str:
|
||||
"""构建SuperChat显示字符串"""
|
||||
superchats = self.get_superchats_by_chat(chat_id)
|
||||
|
||||
|
||||
if not superchats:
|
||||
return ""
|
||||
|
||||
|
||||
# 限制显示数量
|
||||
display_superchats = superchats[:max_count]
|
||||
|
||||
lines = []
|
||||
lines.append("📢 当前有效超级弹幕:")
|
||||
|
||||
|
||||
lines = ["📢 当前有效超级弹幕:"]
|
||||
for i, sc in enumerate(display_superchats, 1):
|
||||
remaining_minutes = int(sc.remaining_time() / 60)
|
||||
remaining_seconds = int(sc.remaining_time() % 60)
|
||||
|
||||
|
||||
time_display = f"{remaining_minutes}分{remaining_seconds}秒" if remaining_minutes > 0 else f"{remaining_seconds}秒"
|
||||
|
||||
|
||||
line = f"{i}. 【{sc.price}元】{sc.user_nickname}: {sc.message_text}"
|
||||
if len(line) > 100: # 限制单行长度
|
||||
line = line[:97] + "..."
|
||||
line = f"{line[:97]}..."
|
||||
line += f" (剩余{time_display})"
|
||||
lines.append(line)
|
||||
|
||||
|
||||
if len(superchats) > max_count:
|
||||
lines.append(f"... 还有{len(superchats) - max_count}条SuperChat")
|
||||
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def build_superchat_summary_string(self, chat_id: str) -> str:
|
||||
"""构建SuperChat摘要字符串"""
|
||||
superchats = self.get_superchats_by_chat(chat_id)
|
||||
|
||||
|
||||
if not superchats:
|
||||
return "当前没有有效的超级弹幕"
|
||||
lines = []
|
||||
for sc in superchats:
|
||||
single_sc_str = f"{sc.user_nickname} - {sc.price}元 - {sc.message_text}"
|
||||
if len(single_sc_str) > 100:
|
||||
single_sc_str = single_sc_str[:97] + "..."
|
||||
single_sc_str = f"{single_sc_str[:97]}..."
|
||||
single_sc_str += f" (剩余{int(sc.remaining_time())}秒)"
|
||||
lines.append(single_sc_str)
|
||||
|
||||
|
||||
total_amount = sum(sc.price for sc in superchats)
|
||||
count = len(superchats)
|
||||
highest_amount = max(sc.price for sc in superchats)
|
||||
|
||||
|
||||
final_str = f"当前有{count}条超级弹幕,总金额{total_amount}元,最高单笔{highest_amount}元"
|
||||
if lines:
|
||||
final_str += "\n" + "\n".join(lines)
|
||||
@@ -287,7 +285,7 @@ class SuperChatManager:
|
||||
"lowest_amount": min(amounts)
|
||||
}
|
||||
|
||||
async def shutdown(self):
|
||||
async def shutdown(self): # sourcery skip: use-contextlib-suppress
|
||||
"""关闭管理器,清理资源"""
|
||||
if self._cleanup_task and not self._cleanup_task.done():
|
||||
self._cleanup_task.cancel()
|
||||
@@ -300,6 +298,7 @@ class SuperChatManager:
|
||||
|
||||
|
||||
|
||||
# sourcery skip: assign-if-exp
|
||||
if ENABLE_S4U:
|
||||
super_chat_manager = SuperChatManager()
|
||||
else:
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
from src.llm_models.utils_model import LLMRequest
|
||||
from src.common.logger import get_logger
|
||||
from src.config.config import global_config
|
||||
from src.config.config import model_config
|
||||
from src.plugin_system.apis import send_api
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
head_actions_list = [
|
||||
"不做额外动作",
|
||||
"点头一次",
|
||||
"点头两次",
|
||||
"摇头",
|
||||
"歪脑袋",
|
||||
"低头望向一边"
|
||||
]
|
||||
head_actions_list = ["不做额外动作", "点头一次", "点头两次", "摇头", "歪脑袋", "低头望向一边"]
|
||||
|
||||
async def yes_or_no_head(text: str,emotion: str = "",chat_history: str = "",chat_id: str = ""):
|
||||
|
||||
async def yes_or_no_head(text: str, emotion: str = "", chat_history: str = "", chat_id: str = ""):
|
||||
prompt = f"""
|
||||
{chat_history}
|
||||
以上是对方的发言:
|
||||
@@ -30,22 +25,14 @@ async def yes_or_no_head(text: str,emotion: str = "",chat_history: str = "",chat
|
||||
低头望向一边
|
||||
|
||||
请从上面的动作中选择一个,并输出,请只输出你选择的动作就好,不要输出其他内容。"""
|
||||
model = LLMRequest(
|
||||
model=global_config.model.emotion,
|
||||
temperature=0.7,
|
||||
request_type="motion",
|
||||
)
|
||||
|
||||
model = LLMRequest(model_set=model_config.model_task_config.emotion, request_type="motion")
|
||||
|
||||
try:
|
||||
# logger.info(f"prompt: {prompt}")
|
||||
response, (reasoning_content, model_name) = await model.generate_response_async(prompt=prompt)
|
||||
response, _ = await model.generate_response_async(prompt=prompt, temperature=0.7)
|
||||
logger.info(f"response: {response}")
|
||||
|
||||
if response in head_actions_list:
|
||||
head_action = response
|
||||
else:
|
||||
head_action = "不做额外动作"
|
||||
|
||||
|
||||
head_action = response if response in head_actions_list else "不做额外动作"
|
||||
await send_api.custom_to_stream(
|
||||
message_type="head_action",
|
||||
content=head_action,
|
||||
@@ -53,11 +40,7 @@ async def yes_or_no_head(text: str,emotion: str = "",chat_history: str = "",chat
|
||||
storage_message=False,
|
||||
show_log=True,
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"yes_or_no_head error: {e}")
|
||||
return "不做额外动作"
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user