@@ -1050,17 +764,10 @@ class StatisticOutputTask(AsyncTask):
]
tab_content_list.append(
- _format_stat_data(stat["all_time"], "all_time", datetime.fromtimestamp(local_storage["deploy_time"]))
+ _format_stat_data(stat["all_time"], "all_time", datetime.fromtimestamp(local_storage["deploy_time"])) # type: ignore
)
- # 添加Focus统计内容
- focus_tab = self._generate_focus_tab(stat)
- tab_content_list.append(focus_tab)
-
- # 添加版本对比内容
- versions_tab = self._generate_versions_tab(stat)
- tab_content_list.append(versions_tab)
-
+ # 不再添加版本对比内容
# 添加图表内容
chart_data = self._generate_chart_data(stat)
tab_content_list.append(self._generate_chart_tab(chart_data))
@@ -1211,619 +918,6 @@ class StatisticOutputTask(AsyncTask):
with open(self.record_file_path, "w", encoding="utf-8") as f:
f.write(html_template)
- def _generate_focus_tab(self, stat: dict[str, Any]) -> str:
- """生成Focus统计独立分页的HTML内容"""
-
- # 为每个时间段准备Focus数据
- focus_sections = []
-
- for period_name, period_delta, period_desc in self.stat_period:
- stat_data = stat.get(period_name, {})
-
- if stat_data.get(FOCUS_TOTAL_CYCLES, 0) <= 0:
- continue
-
- # 生成Focus统计数据行
- focus_action_rows = ""
- focus_chat_rows = ""
- focus_stage_rows = ""
- focus_action_stage_rows = ""
-
- # Action类型统计
- total_actions = sum(stat_data[FOCUS_ACTION_RATIOS].values()) if stat_data[FOCUS_ACTION_RATIOS] else 0
- if total_actions > 0:
- focus_action_rows = "\n".join(
- [
- f"
| {action_type} | {count} | {(count / total_actions * 100):.1f}% |
"
- for action_type, count in sorted(stat_data[FOCUS_ACTION_RATIOS].items())
- ]
- )
-
- # 按聊天流统计(横向表格,显示各阶段时间差异和不同action的平均时间)
- focus_chat_rows = ""
- if stat_data[FOCUS_AVG_TIMES_BY_CHAT_ACTION]:
- # 获取前三个阶段(不包括执行动作)
- basic_stages = ["观察", "并行调整动作、处理", "规划器"]
- existing_basic_stages = []
- for stage in basic_stages:
- # 检查是否有任何聊天流在这个阶段有数据
- stage_exists = False
- for _chat_id, stage_times in stat_data[FOCUS_AVG_TIMES_BY_CHAT_ACTION].items():
- if stage in stage_times:
- stage_exists = True
- break
- if stage_exists:
- existing_basic_stages.append(stage)
-
- # 获取所有action类型(按出现频率排序)
- all_action_types = sorted(
- stat_data[FOCUS_ACTION_RATIOS].keys(), key=lambda x: stat_data[FOCUS_ACTION_RATIOS][x], reverse=True
- )
-
- # 为每个聊天流生成一行
- chat_rows = []
- for chat_id in sorted(
- stat_data[FOCUS_CYCLE_CNT_BY_CHAT].keys(),
- key=lambda x: stat_data[FOCUS_CYCLE_CNT_BY_CHAT][x],
- reverse=True,
- ):
- chat_name = self.name_mapping.get(chat_id, (chat_id, 0))[0]
- cycle_count = stat_data[FOCUS_CYCLE_CNT_BY_CHAT][chat_id]
-
- # 获取该聊天流的各阶段平均时间
- stage_times = stat_data[FOCUS_AVG_TIMES_BY_CHAT_ACTION].get(chat_id, {})
-
- row_cells = [f"
{chat_name} ({cycle_count}次循环) | "]
-
- # 添加基础阶段时间
- for stage in existing_basic_stages:
- time_val = stage_times.get(stage, 0.0)
- row_cells.append(f"
{time_val:.3f}秒 | ")
-
- # 添加每个action类型的平均执行时间
- for action_type in all_action_types:
- # 使用真实的按聊天流+action类型分组的执行时间数据
- exec_times_by_chat_action = stat_data.get("focus_exec_times_by_chat_action", {})
- chat_action_times = exec_times_by_chat_action.get(chat_id, {})
- avg_exec_time = chat_action_times.get(action_type, 0.0)
-
- if avg_exec_time > 0:
- row_cells.append(f"
{avg_exec_time:.3f}秒 | ")
- else:
- row_cells.append("
- | ")
-
- chat_rows.append(f"
{''.join(row_cells)}
")
-
- # 生成表头
- stage_headers = "".join([f"
{stage} | " for stage in existing_basic_stages])
- action_headers = "".join(
- [f"
{action_type} (执行) | " for action_type in all_action_types]
- )
- focus_chat_table_header = f"
| 聊天流 | {stage_headers}{action_headers}
|---|
"
- focus_chat_rows = focus_chat_table_header + "\n" + "\n".join(chat_rows)
-
- # 全局阶段时间统计
- focus_stage_rows = "\n".join(
- [
- f"
| {stage} | {avg_time:.3f}秒 |
"
- for stage, avg_time in sorted(stat_data[FOCUS_AVG_TIMES_BY_STAGE].items())
- ]
- )
-
- # 聊天流Action选择比例对比表(横向表格)
- focus_chat_action_ratios_rows = ""
- if stat_data.get("focus_action_ratios_by_chat"):
- # 获取所有action类型(按全局频率排序)
- all_action_types_for_ratio = sorted(
- stat_data[FOCUS_ACTION_RATIOS].keys(), key=lambda x: stat_data[FOCUS_ACTION_RATIOS][x], reverse=True
- )
-
- if all_action_types_for_ratio:
- # 为每个聊天流生成数据行(按循环数排序)
- chat_ratio_rows = []
- for chat_id in sorted(
- stat_data[FOCUS_CYCLE_CNT_BY_CHAT].keys(),
- key=lambda x: stat_data[FOCUS_CYCLE_CNT_BY_CHAT][x],
- reverse=True,
- ):
- chat_name = self.name_mapping.get(chat_id, (chat_id, 0))[0]
- total_cycles = stat_data[FOCUS_CYCLE_CNT_BY_CHAT][chat_id]
- chat_action_counts = stat_data["focus_action_ratios_by_chat"].get(chat_id, {})
-
- row_cells = [f"
{chat_name} ({total_cycles}次循环) | "]
-
- # 添加每个action类型的数量和百分比
- for action_type in all_action_types_for_ratio:
- count = chat_action_counts.get(action_type, 0)
- ratio = (count / total_cycles * 100) if total_cycles > 0 else 0
- if count > 0:
- row_cells.append(f"
{count} ({ratio:.1f}%) | ")
- else:
- row_cells.append("
- (0%) | ")
-
- chat_ratio_rows.append(f"
{''.join(row_cells)}
")
-
- # 生成表头
- action_headers = "".join([f"
{action_type} | " for action_type in all_action_types_for_ratio])
- chat_action_ratio_table_header = f"
| 聊天流 | {action_headers}
|---|
"
- focus_chat_action_ratios_rows = chat_action_ratio_table_header + "\n" + "\n".join(chat_ratio_rows)
-
- # 按Action类型的阶段时间统计(横向表格)
- focus_action_stage_rows = ""
- if stat_data[FOCUS_AVG_TIMES_BY_ACTION]:
- # 获取所有阶段(按固定顺序)
- stage_order = ["观察", "并行调整动作、处理", "规划器", "执行动作"]
- all_stages = []
- for stage in stage_order:
- if any(stage in stage_times for stage_times in stat_data[FOCUS_AVG_TIMES_BY_ACTION].values()):
- all_stages.append(stage)
-
- # 为每个Action类型生成一行
- action_rows = []
- for action_type in sorted(stat_data[FOCUS_AVG_TIMES_BY_ACTION].keys()):
- stage_times = stat_data[FOCUS_AVG_TIMES_BY_ACTION][action_type]
- row_cells = [f"
{action_type} | "]
-
- for stage in all_stages:
- time_val = stage_times.get(stage, 0.0)
- row_cells.append(f"
{time_val:.3f}秒 | ")
-
- action_rows.append(f"
{''.join(row_cells)}
")
-
- # 生成表头
- stage_headers = "".join([f"
{stage} | " for stage in all_stages])
- focus_action_stage_table_header = f"
| Action类型 | {stage_headers}
|---|
"
- focus_action_stage_rows = focus_action_stage_table_header + "\n" + "\n".join(action_rows)
-
- # 计算时间范围
- if period_name == "all_time":
- from src.manager.local_store_manager import local_storage
-
- start_time = datetime.fromtimestamp(local_storage["deploy_time"])
- time_range = (
- f"{start_time.strftime('%Y-%m-%d %H:%M:%S')} ~ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
- )
- else:
- start_time = datetime.now() - period_delta
- time_range = (
- f"{start_time.strftime('%Y-%m-%d %H:%M:%S')} ~ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
- )
-
- # 生成该时间段的Focus统计HTML
- section_html = f"""
-
-
{period_desc}Focus统计
-
统计时段: {time_range}
-
总循环数: {stat_data.get(FOCUS_TOTAL_CYCLES, 0)}
-
-
-
-
全局阶段平均时间
-
- | 阶段 | 平均时间 |
- {focus_stage_rows}
-
-
-
-
-
Action类型分布
-
- | Action类型 | 次数 | 占比 |
- {focus_action_rows}
-
-
-
-
-
-
-
-
聊天流Action选择比例对比
-
-
- {focus_chat_action_ratios_rows}
-
-
-
-
-
Action类型阶段时间详情
-
-
- {focus_action_stage_rows}
-
-
-
- """
-
- focus_sections.append(section_html)
-
- # 如果没有任何Focus数据
- if not focus_sections:
- focus_sections.append("""
-
-
暂无Focus统计数据
-
在指定时间段内未找到任何Focus循环数据。
-
请确保 log/hfc_loop/ 目录下存在相应的JSON文件。
-
- """)
-
- return f"""
-
-
Focus系统详细统计
-
- 数据来源: log/hfc_loop/ 目录下的JSON文件
- 统计内容: 各时间段的Focus循环性能分析
-
-
- {"".join(focus_sections)}
-
-
-
- """
-
- def _generate_versions_tab(self, stat: dict[str, Any]) -> str:
- """生成版本对比独立分页的HTML内容"""
-
- # 为每个时间段准备版本对比数据
- version_sections = []
-
- for period_name, period_delta, period_desc in self.stat_period:
- stat_data = stat.get(period_name, {})
-
- if not stat_data.get(FOCUS_CYCLE_CNT_BY_VERSION):
- continue
-
- # 获取所有版本(按循环数排序)
- all_versions = sorted(
- stat_data[FOCUS_CYCLE_CNT_BY_VERSION].keys(),
- key=lambda x: stat_data[FOCUS_CYCLE_CNT_BY_VERSION][x],
- reverse=True,
- )
-
- # 生成版本Action分布表
- focus_version_action_rows = ""
- if stat_data[FOCUS_ACTION_RATIOS_BY_VERSION]:
- # 获取所有action类型
- all_action_types_for_version = set()
- for version_actions in stat_data[FOCUS_ACTION_RATIOS_BY_VERSION].values():
- all_action_types_for_version.update(version_actions.keys())
- all_action_types_for_version = sorted(all_action_types_for_version)
-
- if all_action_types_for_version:
- version_action_rows = []
- for version in all_versions:
- version_actions = stat_data[FOCUS_ACTION_RATIOS_BY_VERSION].get(version, {})
- total_cycles = stat_data[FOCUS_CYCLE_CNT_BY_VERSION][version]
-
- row_cells = [f"
{version} ({total_cycles}次循环) | "]
-
- for action_type in all_action_types_for_version:
- count = version_actions.get(action_type, 0)
- ratio = (count / total_cycles * 100) if total_cycles > 0 else 0
- row_cells.append(f"
{count} ({ratio:.1f}%) | ")
-
- version_action_rows.append(f"
{''.join(row_cells)}
")
-
- # 生成表头
- action_headers = "".join(
- [f"
{action_type} | " for action_type in all_action_types_for_version]
- )
- version_action_table_header = f"
| 版本 | {action_headers}
|---|
"
- focus_version_action_rows = version_action_table_header + "\n" + "\n".join(version_action_rows)
-
- # 生成版本阶段时间表(按action类型分解执行时间)
- focus_version_stage_rows = ""
- if stat_data[FOCUS_AVG_TIMES_BY_VERSION]:
- # 基础三个阶段
- basic_stages = ["观察", "并行调整动作、处理", "规划器"]
-
- # 获取所有action类型用于执行时间列
- all_action_types_for_exec = set()
- if stat_data.get("focus_exec_times_by_version_action"):
- for version_actions in stat_data["focus_exec_times_by_version_action"].values():
- all_action_types_for_exec.update(version_actions.keys())
- all_action_types_for_exec = sorted(all_action_types_for_exec)
-
- # 检查哪些基础阶段存在数据
- existing_basic_stages = []
- for stage in basic_stages:
- stage_exists = False
- for version_stages in stat_data[FOCUS_AVG_TIMES_BY_VERSION].values():
- if stage in version_stages:
- stage_exists = True
- break
- if stage_exists:
- existing_basic_stages.append(stage)
-
- # 构建表格
- if existing_basic_stages or all_action_types_for_exec:
- version_stage_rows = []
-
- # 为每个版本生成数据行
- for version in all_versions:
- version_stages = stat_data[FOCUS_AVG_TIMES_BY_VERSION].get(version, {})
- total_cycles = stat_data[FOCUS_CYCLE_CNT_BY_VERSION][version]
-
- row_cells = [f"
{version} ({total_cycles}次循环) | "]
-
- # 添加基础阶段时间
- for stage in existing_basic_stages:
- time_val = version_stages.get(stage, 0.0)
- row_cells.append(f"
{time_val:.3f}秒 | ")
-
- # 添加不同action类型的执行时间
- for action_type in all_action_types_for_exec:
- # 获取该版本该action类型的平均执行时间
- version_exec_times = stat_data.get("focus_exec_times_by_version_action", {})
- if version in version_exec_times and action_type in version_exec_times[version]:
- exec_time = version_exec_times[version][action_type]
- row_cells.append(f"
{exec_time:.3f}秒 | ")
- else:
- row_cells.append("
- | ")
-
- version_stage_rows.append(f"
{''.join(row_cells)}
")
-
- # 生成表头
- basic_headers = "".join([f"
{stage} | " for stage in existing_basic_stages])
- action_headers = "".join(
- [
- f"
执行时间 [{action_type}] | "
- for action_type in all_action_types_for_exec
- ]
- )
- version_stage_table_header = f"
| 版本 | {basic_headers}{action_headers}
|---|
"
- focus_version_stage_rows = version_stage_table_header + "\n" + "\n".join(version_stage_rows)
-
- # 计算时间范围
- if period_name == "all_time":
- from src.manager.local_store_manager import local_storage
-
- start_time = datetime.fromtimestamp(local_storage["deploy_time"])
- time_range = (
- f"{start_time.strftime('%Y-%m-%d %H:%M:%S')} ~ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
- )
- else:
- start_time = datetime.now() - period_delta
- time_range = (
- f"{start_time.strftime('%Y-%m-%d %H:%M:%S')} ~ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
- )
-
- # 生成该时间段的版本对比HTML
- section_html = f"""
-
-
{period_desc}版本对比
-
统计时段: {time_range}
-
包含版本: {len(all_versions)} 个版本
-
-
-
-
版本Action类型分布对比
-
-
- {focus_version_action_rows}
-
-
-
-
-
版本阶段时间对比
-
-
- {focus_version_stage_rows}
-
-
-
-
- """
-
- version_sections.append(section_html)
-
- # 如果没有任何版本数据
- if not version_sections:
- version_sections.append("""
-
-
暂无版本对比数据
-
在指定时间段内未找到任何版本信息。
-
请确保 log/hfc_loop/ 目录下的JSON文件包含版本信息。
-
- """)
-
- return f"""
-
-
Focus HFC版本对比分析
-
- 对比内容: 不同版本的Action类型分布和各阶段性能表现
- 数据来源: log/hfc_loop/ 目录下JSON文件中的version字段
-
-
- {"".join(version_sections)}
-
-
-
- """
-
def _generate_chart_data(self, stat: dict[str, Any]) -> dict:
"""生成图表数据"""
now = datetime.now()
@@ -1865,7 +959,7 @@ class StatisticOutputTask(AsyncTask):
# 查询LLM使用记录
query_start_time = start_time
- for record in LLMUsage.select().where(LLMUsage.timestamp >= query_start_time):
+ for record in LLMUsage.select().where(LLMUsage.timestamp >= query_start_time): # type: ignore
record_time = record.timestamp
# 找到对应的时间间隔索引
@@ -1875,7 +969,7 @@ class StatisticOutputTask(AsyncTask):
if 0 <= interval_index < len(time_points):
# 累加总花费数据
cost = record.cost or 0.0
- total_cost_data[interval_index] += cost
+ total_cost_data[interval_index] += cost # type: ignore
# 累加按模型分类的花费
model_name = record.model_name or "unknown"
@@ -1892,7 +986,7 @@ class StatisticOutputTask(AsyncTask):
# 查询消息记录
query_start_timestamp = start_time.timestamp()
- for message in Messages.select().where(Messages.time >= query_start_timestamp):
+ for message in Messages.select().where(Messages.time >= query_start_timestamp): # type: ignore
message_time_ts = message.time
# 找到对应的时间间隔索引
@@ -1917,71 +1011,16 @@ class StatisticOutputTask(AsyncTask):
message_by_chat[chat_name] = [0] * len(time_points)
message_by_chat[chat_name][interval_index] += 1
- # 查询Focus循环记录
- focus_cycles_by_action = {}
- focus_time_by_stage = {}
-
- log_dir = "log/hfc_loop"
- if os.path.exists(log_dir):
- json_files = glob.glob(os.path.join(log_dir, "*.json"))
- for json_file in json_files:
- try:
- # 解析文件时间
- filename = os.path.basename(json_file)
- name_parts = filename.replace(".json", "").split("_")
- if len(name_parts) >= 4:
- date_str = name_parts[-2]
- time_str = name_parts[-1]
- file_time_str = f"{date_str}_{time_str}"
- file_time = datetime.strptime(file_time_str, "%Y%m%d_%H%M%S")
-
- if file_time >= start_time:
- with open(json_file, "r", encoding="utf-8") as f:
- cycles_data = json.load(f)
-
- for cycle in cycles_data:
- try:
- timestamp_str = cycle.get("timestamp", "")
- if timestamp_str:
- cycle_time = datetime.fromisoformat(timestamp_str.replace("Z", "+00:00"))
- else:
- cycle_time = file_time
-
- if cycle_time >= start_time:
- # 计算时间间隔索引
- time_diff = (cycle_time - start_time).total_seconds()
- interval_index = int(time_diff // interval_seconds)
-
- if 0 <= interval_index < len(time_points):
- action_type = cycle.get("action_type", "unknown")
- step_times = cycle.get("step_times", {})
-
- # 累计action类型数据
- if action_type not in focus_cycles_by_action:
- focus_cycles_by_action[action_type] = [0] * len(time_points)
- focus_cycles_by_action[action_type][interval_index] += 1
-
- # 累计阶段时间数据
- for stage, time_val in step_times.items():
- if stage not in focus_time_by_stage:
- focus_time_by_stage[stage] = [0] * len(time_points)
- focus_time_by_stage[stage][interval_index] += time_val
- except Exception:
- continue
- except Exception:
- continue
-
return {
"time_labels": time_labels,
"total_cost_data": total_cost_data,
"cost_by_model": cost_by_model,
"cost_by_module": cost_by_module,
"message_by_chat": message_by_chat,
- "focus_cycles_by_action": focus_cycles_by_action,
- "focus_time_by_stage": focus_time_by_stage,
}
def _generate_chart_tab(self, chart_data: dict) -> str:
+ # sourcery skip: extract-duplicate-method, move-assign-in-block
"""生成图表选项卡HTML内容"""
# 生成不同颜色的调色板
@@ -2069,14 +1108,8 @@ class StatisticOutputTask(AsyncTask):
-
-
-
-
-
-
-
+
@@ -2179,8 +1212,6 @@ class StatisticOutputTask(AsyncTask):
createChart('costByModule', data, timeRange);
createChart('costByModel', data, timeRange);
createChart('messageByChat', data, timeRange);
- createChart('focusCyclesByAction', data, timeRange);
- createChart('focusTimeByStage', data, timeRange);
}}
function createChart(chartType, data, timeRange) {{
@@ -2293,7 +1324,7 @@ class AsyncStatisticOutputTask(AsyncTask):
# 数据收集任务
collect_task = asyncio.create_task(
- loop.run_in_executor(executor, self._collect_all_statistics, now)
+ loop.run_in_executor(executor, self._collect_all_statistics, now) # type: ignore
)
stats = await collect_task
@@ -2301,8 +1332,8 @@ class AsyncStatisticOutputTask(AsyncTask):
# 创建并发的输出任务
output_tasks = [
- asyncio.create_task(loop.run_in_executor(executor, self._statistic_console_output, stats, now)),
- asyncio.create_task(loop.run_in_executor(executor, self._generate_html_report, stats, now)),
+ asyncio.create_task(loop.run_in_executor(executor, self._statistic_console_output, stats, now)), # type: ignore
+ asyncio.create_task(loop.run_in_executor(executor, self._generate_html_report, stats, now)), # type: ignore
]
# 等待所有输出任务完成
@@ -2317,13 +1348,13 @@ class AsyncStatisticOutputTask(AsyncTask):
# 复用 StatisticOutputTask 的所有方法
def _collect_all_statistics(self, now: datetime):
- return StatisticOutputTask._collect_all_statistics(self, now)
+ return StatisticOutputTask._collect_all_statistics(self, now) # type: ignore
def _statistic_console_output(self, stats: Dict[str, Any], now: datetime):
- return StatisticOutputTask._statistic_console_output(self, stats, now)
+ return StatisticOutputTask._statistic_console_output(self, stats, now) # type: ignore
def _generate_html_report(self, stats: dict[str, Any], now: datetime):
- return StatisticOutputTask._generate_html_report(self, stats, now)
+ return StatisticOutputTask._generate_html_report(self, stats, now) # type: ignore
# 其他需要的方法也可以类似复用...
@staticmethod
@@ -2335,22 +1366,7 @@ class AsyncStatisticOutputTask(AsyncTask):
return StatisticOutputTask._collect_online_time_for_period(collect_period, now)
def _collect_message_count_for_period(self, collect_period: List[Tuple[str, datetime]]) -> Dict[str, Any]:
- return StatisticOutputTask._collect_message_count_for_period(self, collect_period)
-
- def _collect_focus_statistics_for_period(self, collect_period: List[Tuple[str, datetime]]) -> Dict[str, Any]:
- return StatisticOutputTask._collect_focus_statistics_for_period(self, collect_period)
-
- def _process_focus_file_data(
- self,
- cycles_data: List[Dict],
- stats: Dict[str, Any],
- collect_period: List[Tuple[str, datetime]],
- file_time: datetime,
- ):
- return StatisticOutputTask._process_focus_file_data(self, cycles_data, stats, collect_period, file_time)
-
- def _calculate_focus_averages(self, stats: Dict[str, Any]):
- return StatisticOutputTask._calculate_focus_averages(self, stats)
+ return StatisticOutputTask._collect_message_count_for_period(self, collect_period) # type: ignore
@staticmethod
def _format_total_stat(stats: Dict[str, Any]) -> str:
@@ -2361,28 +1377,19 @@ class AsyncStatisticOutputTask(AsyncTask):
return StatisticOutputTask._format_model_classified_stat(stats)
def _format_chat_stat(self, stats: Dict[str, Any]) -> str:
- return StatisticOutputTask._format_chat_stat(self, stats)
-
- def _format_focus_stat(self, stats: Dict[str, Any]) -> str:
- return StatisticOutputTask._format_focus_stat(self, stats)
+ return StatisticOutputTask._format_chat_stat(self, stats) # type: ignore
def _generate_chart_data(self, stat: dict[str, Any]) -> dict:
- return StatisticOutputTask._generate_chart_data(self, stat)
+ return StatisticOutputTask._generate_chart_data(self, stat) # type: ignore
def _collect_interval_data(self, now: datetime, hours: int, interval_minutes: int) -> dict:
- return StatisticOutputTask._collect_interval_data(self, now, hours, interval_minutes)
+ return StatisticOutputTask._collect_interval_data(self, now, hours, interval_minutes) # type: ignore
def _generate_chart_tab(self, chart_data: dict) -> str:
- return StatisticOutputTask._generate_chart_tab(self, chart_data)
+ return StatisticOutputTask._generate_chart_tab(self, chart_data) # type: ignore
def _get_chat_display_name_from_id(self, chat_id: str) -> str:
- return StatisticOutputTask._get_chat_display_name_from_id(self, chat_id)
-
- def _generate_focus_tab(self, stat: dict[str, Any]) -> str:
- return StatisticOutputTask._generate_focus_tab(self, stat)
-
- def _generate_versions_tab(self, stat: dict[str, Any]) -> str:
- return StatisticOutputTask._generate_versions_tab(self, stat)
+ return StatisticOutputTask._get_chat_display_name_from_id(self, chat_id) # type: ignore
def _convert_defaultdict_to_dict(self, data):
- return StatisticOutputTask._convert_defaultdict_to_dict(self, data)
+ return StatisticOutputTask._convert_defaultdict_to_dict(self, data) # type: ignore
diff --git a/src/chat/utils/timer_calculator.py b/src/chat/utils/timer_calculator.py
index df2b9f77..d9479af1 100644
--- a/src/chat/utils/timer_calculator.py
+++ b/src/chat/utils/timer_calculator.py
@@ -1,7 +1,8 @@
+import asyncio
+
from time import perf_counter
from functools import wraps
from typing import Optional, Dict, Callable
-import asyncio
from rich.traceback import install
install(extra_lines=3)
@@ -88,10 +89,10 @@ class Timer:
self.name = name
self.storage = storage
- self.elapsed = None
+ self.elapsed: float = None # type: ignore
self.auto_unit = auto_unit
- self.start = None
+ self.start: float = None # type: ignore
@staticmethod
def _validate_types(name, storage):
@@ -120,7 +121,7 @@ class Timer:
return None
wrapper = async_wrapper if asyncio.iscoroutinefunction(func) else sync_wrapper
- wrapper.__timer__ = self # 保留计时器引用
+ wrapper.__timer__ = self # 保留计时器引用 # type: ignore
return wrapper
def __enter__(self):
diff --git a/src/chat/utils/typo_generator.py b/src/chat/utils/typo_generator.py
index 24d65057..4de21946 100644
--- a/src/chat/utils/typo_generator.py
+++ b/src/chat/utils/typo_generator.py
@@ -7,10 +7,10 @@ import math
import os
import random
import time
+import jieba
+
from collections import defaultdict
from pathlib import Path
-
-import jieba
from pypinyin import Style, pinyin
from src.common.logger import get_logger
@@ -104,7 +104,7 @@ class ChineseTypoGenerator:
try:
return "\u4e00" <= char <= "\u9fff"
except Exception as e:
- logger.debug(e)
+ logger.debug(str(e))
return False
def _get_pinyin(self, sentence):
@@ -138,7 +138,7 @@ class ChineseTypoGenerator:
# 如果最后一个字符不是数字,说明可能是轻声或其他特殊情况
if not py[-1].isdigit():
# 为非数字结尾的拼音添加数字声调1
- return py + "1"
+ return f"{py}1"
base = py[:-1] # 去掉声调
tone = int(py[-1]) # 获取声调
@@ -363,7 +363,7 @@ class ChineseTypoGenerator:
else:
# 处理多字词的单字替换
word_result = []
- for _, (char, py) in enumerate(zip(word, word_pinyin)):
+ for _, (char, py) in enumerate(zip(word, word_pinyin, strict=False)):
# 词中的字替换概率降低
word_error_rate = self.error_rate * (0.7 ** (len(word) - 1))
diff --git a/src/chat/utils/utils.py b/src/chat/utils/utils.py
index a147846c..e7d2cadd 100644
--- a/src/chat/utils/utils.py
+++ b/src/chat/utils/utils.py
@@ -1,19 +1,22 @@
import random
import re
+import string
import time
-from collections import Counter
-
import jieba
import numpy as np
+
+from collections import Counter
from maim_message import UserInfo
+from typing import Optional, Tuple, Dict, List, Any
from src.common.logger import get_logger
-from src.manager.mood_manager import mood_manager
-from ..message_receive.message import MessageRecv
+from src.common.message_repository import find_messages, count_messages
+from src.config.config import global_config
+from src.chat.message_receive.message import MessageRecv
+from src.chat.message_receive.chat_stream import get_chat_manager
from src.llm_models.utils_model import LLMRequest
+from src.person_info.person_info import PersonInfoManager, get_person_info_manager
from .typo_generator import ChineseTypoGenerator
-from ...config.config import global_config
-from ...common.message_repository import find_messages, count_messages
logger = get_logger("chat_utils")
@@ -27,11 +30,7 @@ def db_message_to_str(message_dict: dict) -> str:
logger.debug(f"message_dict: {message_dict}")
time_str = time.strftime("%m-%d %H:%M:%S", time.localtime(message_dict["time"]))
try:
- name = "[(%s)%s]%s" % (
- message_dict["user_id"],
- message_dict.get("user_nickname", ""),
- message_dict.get("user_cardname", ""),
- )
+ name = f"[({message_dict['user_id']}){message_dict.get('user_nickname', '')}]{message_dict.get('user_cardname', '')}"
except Exception:
name = message_dict.get("user_nickname", "") or f"用户{message_dict['user_id']}"
content = message_dict.get("processed_plain_text", "")
@@ -47,17 +46,18 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
reply_probability = 0.0
is_at = False
is_mentioned = False
-
+ if message.is_mentioned is not None:
+ return bool(message.is_mentioned), message.is_mentioned
if (
message.message_info.additional_config is not None
and message.message_info.additional_config.get("is_mentioned") is not None
):
try:
- reply_probability = float(message.message_info.additional_config.get("is_mentioned"))
+ reply_probability = float(message.message_info.additional_config.get("is_mentioned")) # type: ignore
is_mentioned = True
return is_mentioned, reply_probability
except Exception as e:
- logger.warning(e)
+ logger.warning(str(e))
logger.warning(
f"消息中包含不合理的设置 is_mentioned: {message.message_info.additional_config.get('is_mentioned')}"
)
@@ -80,7 +80,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
if is_at and global_config.normal_chat.at_bot_inevitable_reply:
reply_probability = 1.0
- logger.info("被@,回复概率设置为100%")
+ logger.debug("被@,回复概率设置为100%")
else:
if not is_mentioned:
# 判断是否被回复
@@ -105,7 +105,7 @@ def is_mentioned_bot_in_message(message: MessageRecv) -> tuple[bool, float]:
is_mentioned = True
if is_mentioned and global_config.normal_chat.mentioned_bot_inevitable_reply:
reply_probability = 1.0
- logger.info("被提及,回复概率设置为100%")
+ logger.debug("被提及,回复概率设置为100%")
return is_mentioned, reply_probability
@@ -122,30 +122,6 @@ async def get_embedding(text, request_type="embedding"):
return embedding
-def get_recent_group_detailed_plain_text(chat_stream_id: str, limit: int = 12, combine=False):
- filter_query = {"chat_id": chat_stream_id}
- sort_order = [("time", -1)]
- recent_messages = find_messages(message_filter=filter_query, sort=sort_order, limit=limit)
-
- if not recent_messages:
- return []
-
- message_detailed_plain_text = ""
- message_detailed_plain_text_list = []
-
- # 反转消息列表,使最新的消息在最后
- recent_messages.reverse()
-
- if combine:
- for msg_db_data in recent_messages:
- message_detailed_plain_text += str(msg_db_data["detailed_plain_text"])
- return message_detailed_plain_text
- else:
- for msg_db_data in recent_messages:
- message_detailed_plain_text_list.append(msg_db_data["detailed_plain_text"])
- return message_detailed_plain_text_list
-
-
def get_recent_group_speaker(chat_stream_id: str, sender, limit: int = 12) -> list:
# 获取当前群聊记录内发言的人
filter_query = {"chat_id": chat_stream_id}
@@ -199,10 +175,7 @@ def split_into_sentences_w_remove_punctuation(text: str) -> list[str]:
len_text = len(text)
if len_text < 3:
- if random.random() < 0.01:
- return list(text) # 如果文本很短且触发随机条件,直接按字符分割
- else:
- return [text]
+ return list(text) if random.random() < 0.01 else [text]
# 定义分隔符
separators = {",", ",", " ", "。", ";"}
@@ -312,7 +285,7 @@ def random_remove_punctuation(text: str) -> str:
continue
elif char == ",":
rand = random.random()
- if rand < 0.25: # 5%概率删除逗号
+ if rand < 0.05: # 5%概率删除逗号
continue
elif rand < 0.25: # 20%概率把逗号变成空格
result += " "
@@ -347,10 +320,9 @@ def process_llm_response(text: str, enable_splitter: bool = True, enable_chinese
max_length = global_config.response_splitter.max_length * 2
max_sentence_num = global_config.response_splitter.max_sentence_num
# 如果基本上是中文,则进行长度过滤
- if get_western_ratio(cleaned_text) < 0.1:
- if len(cleaned_text) > max_length:
- logger.warning(f"回复过长 ({len(cleaned_text)} 字符),返回默认回复")
- return ["懒得说"]
+ if get_western_ratio(cleaned_text) < 0.1 and len(cleaned_text) > max_length:
+ logger.warning(f"回复过长 ({len(cleaned_text)} 字符),返回默认回复")
+ return ["懒得说"]
typo_generator = ChineseTypoGenerator(
error_rate=global_config.chinese_typo.error_rate,
@@ -408,14 +380,14 @@ def calculate_typing_time(
- 在所有输入结束后,额外加上回车时间0.3秒
- 如果is_emoji为True,将使用固定1秒的输入时间
"""
- # 将0-1的唤醒度映射到-1到1
- mood_arousal = mood_manager.current_mood.arousal
- # 映射到0.5到2倍的速度系数
- typing_speed_multiplier = 1.5**mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
- chinese_time *= 1 / typing_speed_multiplier
- english_time *= 1 / typing_speed_multiplier
+ # # 将0-1的唤醒度映射到-1到1
+ # mood_arousal = mood_manager.current_mood.arousal
+ # # 映射到0.5到2倍的速度系数
+ # typing_speed_multiplier = 1.5**mood_arousal # 唤醒度为1时速度翻倍,为-1时速度减半
+ # chinese_time *= 1 / typing_speed_multiplier
+ # english_time *= 1 / typing_speed_multiplier
# 计算中文字符数
- chinese_chars = sum(1 for char in input_string if "\u4e00" <= char <= "\u9fff")
+ chinese_chars = sum("\u4e00" <= char <= "\u9fff" for char in input_string)
# 如果只有一个中文字符,使用3倍时间
if chinese_chars == 1 and len(input_string.strip()) == 1:
@@ -424,11 +396,7 @@ def calculate_typing_time(
# 正常计算所有字符的输入时间
total_time = 0.0
for char in input_string:
- if "\u4e00" <= char <= "\u9fff": # 判断是否为中文字符
- total_time += chinese_time
- else: # 其他字符(如英文)
- total_time += english_time
-
+ total_time += chinese_time if "\u4e00" <= char <= "\u9fff" else english_time
if is_emoji:
total_time = 1
@@ -448,18 +416,14 @@ def cosine_similarity(v1, v2):
dot_product = np.dot(v1, v2)
norm1 = np.linalg.norm(v1)
norm2 = np.linalg.norm(v2)
- if norm1 == 0 or norm2 == 0:
- return 0
- return dot_product / (norm1 * norm2)
+ return 0 if norm1 == 0 or norm2 == 0 else dot_product / (norm1 * norm2)
def text_to_vector(text):
"""将文本转换为词频向量"""
# 分词
words = jieba.lcut(text)
- # 统计词频
- word_freq = Counter(words)
- return word_freq
+ return Counter(words)
def find_similar_topics_simple(text: str, topics: list, top_k: int = 5) -> list:
@@ -486,9 +450,7 @@ def find_similar_topics_simple(text: str, topics: list, top_k: int = 5) -> list:
def truncate_message(message: str, max_length=20) -> str:
"""截断消息,使其不超过指定长度"""
- if len(message) > max_length:
- return message[:max_length] + "..."
- return message
+ return f"{message[:max_length]}..." if len(message) > max_length else message
def protect_kaomoji(sentence):
@@ -517,7 +479,7 @@ def protect_kaomoji(sentence):
placeholder_to_kaomoji = {}
for idx, match in enumerate(kaomoji_matches):
- kaomoji = match[0] if match[0] else match[1]
+ kaomoji = match[0] or match[1]
placeholder = f"__KAOMOJI_{idx}__"
sentence = sentence.replace(kaomoji, placeholder, 1)
placeholder_to_kaomoji[placeholder] = kaomoji
@@ -558,7 +520,7 @@ def get_western_ratio(paragraph):
if not alnum_chars:
return 0.0
- western_count = sum(1 for char in alnum_chars if is_english_letter(char))
+ western_count = sum(bool(is_english_letter(char)) for char in alnum_chars)
return western_count / len(alnum_chars)
@@ -605,6 +567,7 @@ def count_messages_between(start_time: float, end_time: float, stream_id: str) -
def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal") -> str:
+ # sourcery skip: merge-comparisons, merge-duplicate-blocks, switch
"""将时间戳转换为人类可读的时间格式
Args:
@@ -616,7 +579,7 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
"""
if mode == "normal":
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
- if mode == "normal_no_YMD":
+ elif mode == "normal_no_YMD":
return time.strftime("%H:%M:%S", time.localtime(timestamp))
elif mode == "relative":
now = time.time()
@@ -635,5 +598,172 @@ def translate_timestamp_to_human_readable(timestamp: float, mode: str = "normal"
else:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp)) + ":"
else: # mode = "lite" or unknown
- # 只返回时分秒格式,喵~
+ # 只返回时分秒格式
return time.strftime("%H:%M:%S", time.localtime(timestamp))
+
+
+def get_chat_type_and_target_info(chat_id: str) -> Tuple[bool, Optional[Dict]]:
+ """
+ 获取聊天类型(是否群聊)和私聊对象信息。
+
+ Args:
+ chat_id: 聊天流ID
+
+ Returns:
+ Tuple[bool, Optional[Dict]]:
+ - bool: 是否为群聊 (True 是群聊, False 是私聊或未知)
+ - Optional[Dict]: 如果是私聊,包含对方信息的字典;否则为 None。
+ 字典包含: platform, user_id, user_nickname, person_id, person_name
+ """
+ is_group_chat = False # Default to private/unknown
+ chat_target_info = None
+
+ try:
+ if chat_stream := get_chat_manager().get_stream(chat_id):
+ if chat_stream.group_info:
+ is_group_chat = True
+ chat_target_info = None # Explicitly None for group chat
+ elif chat_stream.user_info: # It's a private chat
+ is_group_chat = False
+ user_info = chat_stream.user_info
+ platform: str = chat_stream.platform
+ user_id: str = user_info.user_id # type: ignore
+
+ # Initialize target_info with basic info
+ target_info = {
+ "platform": platform,
+ "user_id": user_id,
+ "user_nickname": user_info.user_nickname,
+ "person_id": None,
+ "person_name": None,
+ }
+
+ # Try to fetch person info
+ try:
+ # Assume get_person_id is sync (as per original code), keep using to_thread
+ person_id = PersonInfoManager.get_person_id(platform, user_id)
+ person_name = None
+ if person_id:
+ # get_value is async, so await it directly
+ person_info_manager = get_person_info_manager()
+ person_name = person_info_manager.get_value_sync(person_id, "person_name")
+
+ target_info["person_id"] = person_id
+ target_info["person_name"] = person_name
+ except Exception as person_e:
+ logger.warning(
+ f"获取 person_id 或 person_name 时出错 for {platform}:{user_id} in utils: {person_e}"
+ )
+
+ chat_target_info = target_info
+ else:
+ logger.warning(f"无法获取 chat_stream for {chat_id} in utils")
+ except Exception as e:
+ logger.error(f"获取聊天类型和目标信息时出错 for {chat_id}: {e}", exc_info=True)
+ # Keep defaults on error
+
+ return is_group_chat, chat_target_info
+
+
+def assign_message_ids(messages: List[Any]) -> List[Dict[str, Any]]:
+ """
+ 为消息列表中的每个消息分配唯一的简短随机ID
+
+ Args:
+ messages: 消息列表
+
+ Returns:
+ 包含 {'id': str, 'message': any} 格式的字典列表
+ """
+ result = []
+ used_ids = set()
+ len_i = len(messages)
+ if len_i > 100:
+ a = 10
+ b = 99
+ else:
+ a = 1
+ b = 9
+
+ for i, message in enumerate(messages):
+ # 生成唯一的简短ID
+ while True:
+ # 使用索引+随机数生成简短ID
+ random_suffix = random.randint(a, b)
+ message_id = f"m{i+1}{random_suffix}"
+
+ if message_id not in used_ids:
+ used_ids.add(message_id)
+ break
+
+ result.append({
+ 'id': message_id,
+ 'message': message
+ })
+
+ return result
+
+
+def assign_message_ids_flexible(
+ messages: list,
+ prefix: str = "msg",
+ id_length: int = 6,
+ use_timestamp: bool = False
+) -> list:
+ """
+ 为消息列表中的每个消息分配唯一的简短随机ID(增强版)
+
+ Args:
+ messages: 消息列表
+ prefix: ID前缀,默认为"msg"
+ id_length: ID的总长度(不包括前缀),默认为6
+ use_timestamp: 是否在ID中包含时间戳,默认为False
+
+ Returns:
+ 包含 {'id': str, 'message': any} 格式的字典列表
+ """
+ result = []
+ used_ids = set()
+
+ for i, message in enumerate(messages):
+ # 生成唯一的ID
+ while True:
+ if use_timestamp:
+ # 使用时间戳的后几位 + 随机字符
+ timestamp_suffix = str(int(time.time() * 1000))[-3:]
+ remaining_length = id_length - 3
+ random_chars = ''.join(random.choices(string.ascii_lowercase + string.digits, k=remaining_length))
+ message_id = f"{prefix}{timestamp_suffix}{random_chars}"
+ else:
+ # 使用索引 + 随机字符
+ index_str = str(i + 1)
+ remaining_length = max(1, id_length - len(index_str))
+ random_chars = ''.join(random.choices(string.ascii_lowercase + string.digits, k=remaining_length))
+ message_id = f"{prefix}{index_str}{random_chars}"
+
+ if message_id not in used_ids:
+ used_ids.add(message_id)
+ break
+
+ result.append({
+ 'id': message_id,
+ 'message': message
+ })
+
+ return result
+
+
+# 使用示例:
+# messages = ["Hello", "World", "Test message"]
+#
+# # 基础版本
+# result1 = assign_message_ids(messages)
+# # 结果: [{'id': 'm1123', 'message': 'Hello'}, {'id': 'm2456', 'message': 'World'}, {'id': 'm3789', 'message': 'Test message'}]
+#
+# # 增强版本 - 自定义前缀和长度
+# result2 = assign_message_ids_flexible(messages, prefix="chat", id_length=8)
+# # 结果: [{'id': 'chat1abc2', 'message': 'Hello'}, {'id': 'chat2def3', 'message': 'World'}, {'id': 'chat3ghi4', 'message': 'Test message'}]
+#
+# # 增强版本 - 使用时间戳
+# result3 = assign_message_ids_flexible(messages, prefix="ts", use_timestamp=True)
+# # 结果: [{'id': 'ts123a1b', 'message': 'Hello'}, {'id': 'ts123c2d', 'message': 'World'}, {'id': 'ts123e3f', 'message': 'Test message'}]
diff --git a/src/chat/utils/utils_image.py b/src/chat/utils/utils_image.py
index 25b753ba..858d95aa 100644
--- a/src/chat/utils/utils_image.py
+++ b/src/chat/utils/utils_image.py
@@ -3,21 +3,20 @@ import os
import time
import hashlib
import uuid
+import io
+import asyncio
+import numpy as np
+
from typing import Optional, Tuple
from PIL import Image
-import io
-import numpy as np
-import asyncio
-
+from rich.traceback import install
+from src.common.logger import get_logger
from src.common.database.database import db
from src.common.database.database_model import Images, ImageDescriptions
from src.config.config import global_config
from src.llm_models.utils_model import LLMRequest
-from src.common.logger import get_logger
-from rich.traceback import install
-
install(extra_lines=3)
logger = get_logger("chat_image")
@@ -95,7 +94,7 @@ class ImageManager:
logger.error(f"保存描述到数据库失败 (Peewee): {str(e)}")
async def get_emoji_description(self, image_base64: str) -> str:
- """获取表情包描述,带查重和保存功能"""
+ """获取表情包描述,使用二步走识别并带缓存优化"""
try:
# 计算图片哈希
# 确保base64字符串只包含ASCII字符
@@ -103,38 +102,71 @@ class ImageManager:
image_base64 = image_base64.encode("ascii", errors="ignore").decode("ascii")
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
- image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
+ image_format = Image.open(io.BytesIO(image_bytes)).format.lower() # type: ignore
# 查询缓存的描述
cached_description = self._get_description_from_db(image_hash, "emoji")
if cached_description:
- return f"[表情包,含义看起来是:{cached_description}]"
+ return f"[表情包:{cached_description}]"
- # 调用AI获取描述
- if image_format == "gif" or image_format == "GIF":
+ # === 二步走识别流程 ===
+
+ # 第一步:VLM视觉分析 - 生成详细描述
+ if image_format in ["gif", "GIF"]:
image_base64_processed = self.transform_gif(image_base64)
if image_base64_processed is None:
logger.warning("GIF转换失败,无法获取描述")
return "[表情包(GIF处理失败)]"
- prompt = "这是一个动态图表情包,每一张图代表了动态图的某一帧,黑色背景代表透明,使用1-2个词描述一下表情包表达的情感和内容,简短一些,输出一段平文本,不超过15个字"
- description, _ = await self._llm.generate_response_for_image(prompt, image_base64_processed, "jpg")
+ vlm_prompt = "这是一个动态图表情包,每一张图代表了动态图的某一帧,黑色背景代表透明,描述一下表情包表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
+ detailed_description, _ = await self._llm.generate_response_for_image(vlm_prompt, image_base64_processed, "jpg")
else:
- prompt = "图片是一个表情包,请用使用1-2个词描述一下表情包所表达的情感和内容,简短一些,输出一段平文本,不超过15个字"
- description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
+ vlm_prompt = "这是一个表情包,请详细描述一下表情包所表达的情感和内容,描述细节,从互联网梗,meme的角度去分析"
+ detailed_description, _ = await self._llm.generate_response_for_image(vlm_prompt, image_base64, image_format)
- if description is None:
- logger.warning("AI未能生成表情包描述")
- return "[表情包(描述生成失败)]"
+ if detailed_description is None:
+ logger.warning("VLM未能生成表情包详细描述")
+ return "[表情包(VLM描述生成失败)]"
+
+ # 第二步:LLM情感分析 - 基于详细描述生成简短的情感标签
+ emotion_prompt = f"""
+ 请你基于这个表情包的详细描述,提取出最核心的情感含义,用1-2个词概括。
+ 详细描述:'{detailed_description}'
+
+ 要求:
+ 1. 只输出1-2个最核心的情感词汇
+ 2. 从互联网梗、meme的角度理解
+ 3. 输出简短精准,不要解释
+ 4. 如果有多个词用逗号分隔
+ """
+
+ # 使用较低温度确保输出稳定
+ emotion_llm = LLMRequest(model=global_config.model.utils, temperature=0.3, max_tokens=50, request_type="emoji")
+ emotion_result, _ = await emotion_llm.generate_response_async(emotion_prompt)
+
+ if emotion_result is None:
+ logger.warning("LLM未能生成情感标签,使用详细描述的前几个词")
+ # 降级处理:从详细描述中提取关键词
+ import jieba
+ words = list(jieba.cut(detailed_description))
+ emotion_result = ",".join(words[:2]) if len(words) >= 2 else (words[0] if words else "表情")
+
+ # 处理情感结果,取前1-2个最重要的标签
+ emotions = [e.strip() for e in emotion_result.replace(",", ",").split(",") if e.strip()]
+ final_emotion = emotions[0] if emotions else "表情"
+
+ # 如果有第二个情感且不重复,也包含进来
+ if len(emotions) > 1 and emotions[1] != emotions[0]:
+ final_emotion = f"{emotions[0]},{emotions[1]}"
+
+ logger.info(f"[二步走识别] 详细描述: {detailed_description[:50]}... -> 情感标签: {final_emotion}")
# 再次检查缓存,防止并发写入时重复生成
cached_description = self._get_description_from_db(image_hash, "emoji")
if cached_description:
logger.warning(f"虽然生成了描述,但是找到缓存表情包描述: {cached_description}")
- return f"[表情包,含义看起来是:{cached_description}]"
+ return f"[表情包:{cached_description}]"
- # 根据配置决定是否保存图片
- # if global_config.emoji.save_emoji:
- # 生成文件名和路径
+ # 保存表情包文件和元数据(用于可能的后续分析)
logger.debug(f"保存表情包: {image_hash}")
current_timestamp = time.time()
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
@@ -147,29 +179,29 @@ class ImageManager:
with open(file_path, "wb") as f:
f.write(image_bytes)
- # 保存到数据库 (Images表)
+ # 保存到数据库 (Images表) - 包含详细描述用于可能的注册流程
try:
img_obj = Images.get((Images.emoji_hash == image_hash) & (Images.type == "emoji"))
img_obj.path = file_path
- img_obj.description = description
+ img_obj.description = detailed_description # 保存详细描述
img_obj.timestamp = current_timestamp
img_obj.save()
- except Images.DoesNotExist:
+ except Images.DoesNotExist: # type: ignore
Images.create(
emoji_hash=image_hash,
path=file_path,
type="emoji",
- description=description,
+ description=detailed_description, # 保存详细描述
timestamp=current_timestamp,
)
- # logger.debug(f"保存表情包元数据: {file_path}")
except Exception as e:
logger.error(f"保存表情包文件或元数据失败: {str(e)}")
- # 保存描述到数据库 (ImageDescriptions表)
- self._save_description_to_db(image_hash, description, "emoji")
+ # 保存最终的情感标签到缓存 (ImageDescriptions表)
+ self._save_description_to_db(image_hash, final_emotion, "emoji")
- return f"[表情包:{description}]"
+ return f"[表情包:{final_emotion}]"
+
except Exception as e:
logger.error(f"获取表情包描述失败: {str(e)}")
return "[表情包]"
@@ -178,12 +210,24 @@ class ImageManager:
"""获取普通图片描述,带查重和保存功能"""
try:
# 计算图片哈希
- # 确保base64字符串只包含ASCII字符
if isinstance(image_base64, str):
image_base64 = image_base64.encode("ascii", errors="ignore").decode("ascii")
image_bytes = base64.b64decode(image_base64)
image_hash = hashlib.md5(image_bytes).hexdigest()
- image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
+
+ # 检查图片是否已存在
+ existing_image = Images.get_or_none(Images.emoji_hash == image_hash)
+ if existing_image:
+ # 更新计数
+ if hasattr(existing_image, "count") and existing_image.count is not None:
+ existing_image.count += 1
+ else:
+ existing_image.count = 1
+ existing_image.save()
+
+ # 如果已有描述,直接返回
+ if existing_image.description:
+ return f"[图片:{existing_image.description}]"
# 查询缓存的描述
cached_description = self._get_description_from_db(image_hash, "image")
@@ -192,24 +236,15 @@ class ImageManager:
return f"[图片:{cached_description}]"
# 调用AI获取描述
- prompt = "请用中文描述这张图片的内容。如果有文字,请把文字都描述出来,请留意其主题,直观感受,输出为一段平文本,最多50字"
+ image_format = Image.open(io.BytesIO(image_bytes)).format.lower() # type: ignore
+ prompt = global_config.custom_prompt.image_prompt
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
if description is None:
logger.warning("AI未能生成图片描述")
return "[图片(描述生成失败)]"
- # 再次检查缓存
- cached_description = self._get_description_from_db(image_hash, "image")
- if cached_description:
- logger.warning(f"虽然生成了描述,但是找到缓存图片描述 {cached_description}")
- return f"[图片:{cached_description}]"
-
- logger.debug(f"描述是{description}")
-
- # 根据配置决定是否保存图片
-
- # 生成文件名和路径
+ # 保存图片和描述
current_timestamp = time.time()
filename = f"{int(current_timestamp)}_{image_hash[:8]}.{image_format}"
image_dir = os.path.join(self.IMAGE_DIR, "image")
@@ -221,26 +256,31 @@ class ImageManager:
with open(file_path, "wb") as f:
f.write(image_bytes)
- # 保存到数据库 (Images表)
- try:
- img_obj = Images.get((Images.emoji_hash == image_hash) & (Images.type == "image"))
- img_obj.path = file_path
- img_obj.description = description
- img_obj.timestamp = current_timestamp
- img_obj.save()
- except Images.DoesNotExist:
+ # 保存到数据库,补充缺失字段
+ if existing_image:
+ existing_image.path = file_path
+ existing_image.description = description
+ existing_image.timestamp = current_timestamp
+ if not hasattr(existing_image, "image_id") or not existing_image.image_id:
+ existing_image.image_id = str(uuid.uuid4())
+ if not hasattr(existing_image, "vlm_processed") or existing_image.vlm_processed is None:
+ existing_image.vlm_processed = True
+ existing_image.save()
+ else:
Images.create(
+ image_id=str(uuid.uuid4()),
emoji_hash=image_hash,
path=file_path,
type="image",
description=description,
timestamp=current_timestamp,
+ vlm_processed=True,
+ count=1,
)
- logger.debug(f"保存图片元数据: {file_path}")
except Exception as e:
logger.error(f"保存图片文件或元数据失败: {str(e)}")
- # 保存描述到数据库 (ImageDescriptions表)
+ # 保存描述到ImageDescriptions表
self._save_description_to_db(image_hash, description, "image")
return f"[图片:{description}]"
@@ -250,6 +290,7 @@ class ImageManager:
@staticmethod
def transform_gif(gif_base64: str, similarity_threshold: float = 1000.0, max_frames: int = 15) -> Optional[str]:
+ # sourcery skip: use-contextlib-suppress
"""将GIF转换为水平拼接的静态图像, 跳过相似的帧
Args:
@@ -343,7 +384,7 @@ class ImageManager:
# 创建拼接图像
total_width = target_width * len(resized_frames)
# 防止总宽度为0
- if total_width == 0 and len(resized_frames) > 0:
+ if total_width == 0 and resized_frames:
logger.warning("计算出的总宽度为0,但有选中帧,可能目标宽度太小")
# 至少给点宽度吧
total_width = len(resized_frames)
@@ -360,10 +401,7 @@ class ImageManager:
# 转换为base64
buffer = io.BytesIO()
combined_image.save(buffer, format="JPEG", quality=85) # 保存为JPEG
- result_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8")
-
- return result_base64
-
+ return base64.b64encode(buffer.getvalue()).decode("utf-8")
except MemoryError:
logger.error("GIF转换失败: 内存不足,可能是GIF太大或帧数太多")
return None # 内存不够啦
@@ -372,6 +410,7 @@ class ImageManager:
return None # 其他错误也返回None
async def process_image(self, image_base64: str) -> Tuple[str, str]:
+ # sourcery skip: hoist-if-from-if
"""处理图片并返回图片ID和描述
Args:
@@ -410,17 +449,9 @@ class ImageManager:
if existing_image.vlm_processed is None:
existing_image.vlm_processed = False
- existing_image.count += 1
- existing_image.save()
- return existing_image.image_id, f"[picid:{existing_image.image_id}]"
- else:
- # print(f"图片已存在: {existing_image.image_id}")
- # print(f"图片描述: {existing_image.description}")
- # print(f"图片计数: {existing_image.count}")
- # 更新计数
- existing_image.count += 1
- existing_image.save()
- return existing_image.image_id, f"[picid:{existing_image.image_id}]"
+ existing_image.count += 1
+ existing_image.save()
+ return existing_image.image_id, f"[picid:{existing_image.image_id}]"
else:
# print(f"图片不存在: {image_hash}")
image_id = str(uuid.uuid4())
@@ -483,10 +514,10 @@ class ImageManager:
return
# 获取图片格式
- image_format = Image.open(io.BytesIO(image_bytes)).format.lower()
+ image_format = Image.open(io.BytesIO(image_bytes)).format.lower() # type: ignore
# 构建prompt
- prompt = """请用中文描述这张图片的内容。如果有文字,请把文字描述概括出来,请留意其主题,直观感受,输出为一段平文本,最多30字,请注意不要分点,就输出一段文本"""
+ prompt = global_config.custom_prompt.image_prompt
# 获取VLM描述
description, _ = await self._llm.generate_response_for_image(prompt, image_base64, image_format)
diff --git a/src/chat/utils/utils_voice.py b/src/chat/utils/utils_voice.py
new file mode 100644
index 00000000..cf71dc56
--- /dev/null
+++ b/src/chat/utils/utils_voice.py
@@ -0,0 +1,35 @@
+import base64
+
+from src.config.config import global_config
+from src.llm_models.utils_model import LLMRequest
+
+from src.common.logger import get_logger
+from rich.traceback import install
+install(extra_lines=3)
+
+logger = get_logger("chat_voice")
+
+async def get_voice_text(voice_base64: str) -> str:
+ """获取音频文件描述"""
+ if not global_config.voice.enable_asr:
+ logger.warning("语音识别未启用,无法处理语音消息")
+ return "[语音]"
+ try:
+ # 解码base64音频数据
+ # 确保base64字符串只包含ASCII字符
+ if isinstance(voice_base64, str):
+ voice_base64 = voice_base64.encode("ascii", errors="ignore").decode("ascii")
+ voice_bytes = base64.b64decode(voice_base64)
+ _llm = LLMRequest(model=global_config.model.voice, request_type="voice")
+ text = await _llm.generate_response_for_voice(voice_bytes)
+ if text is None:
+ logger.warning("未能生成语音文本")
+ return "[语音(文本生成失败)]"
+
+ logger.debug(f"描述是{text}")
+
+ return f"[语音:{text}]"
+ except Exception as e:
+ logger.error(f"语音转文字失败: {str(e)}")
+ return "[语音]"
+
diff --git a/src/chat/normal_chat/willing/mode_classical.py b/src/chat/willing/mode_classical.py
similarity index 79%
rename from src/chat/normal_chat/willing/mode_classical.py
rename to src/chat/willing/mode_classical.py
index 0b296bbf..e1527233 100644
--- a/src/chat/normal_chat/willing/mode_classical.py
+++ b/src/chat/willing/mode_classical.py
@@ -21,22 +21,29 @@ class ClassicalWillingManager(BaseWillingManager):
self._decay_task = asyncio.create_task(self._decay_reply_willing())
async def get_reply_probability(self, message_id):
+ # sourcery skip: inline-immediately-returned-variable
willing_info = self.ongoing_messages[message_id]
chat_id = willing_info.chat_id
current_willing = self.chat_reply_willing.get(chat_id, 0)
+
+ # print(f"[{chat_id}] 回复意愿: {current_willing}")
interested_rate = willing_info.interested_rate * global_config.normal_chat.response_interested_rate_amplifier
+
+ # print(f"[{chat_id}] 兴趣值: {interested_rate}")
- if interested_rate > 0.4:
- current_willing += interested_rate - 0.3
+ if interested_rate > 0.2:
+ current_willing += interested_rate - 0.2
- if willing_info.is_mentioned_bot:
+ if willing_info.is_mentioned_bot and global_config.normal_chat.mentioned_bot_inevitable_reply and current_willing < 2:
current_willing += 1 if current_willing < 1.0 else 0.05
- self.chat_reply_willing[chat_id] = min(current_willing, 3.0)
-
+ self.chat_reply_willing[chat_id] = min(current_willing, 1.0)
+
reply_probability = min(max((current_willing - 0.5), 0.01) * 2, 1)
-
+
+ # print(f"[{chat_id}] 回复概率: {reply_probability}")
+
return reply_probability
async def before_generate_reply_handle(self, message_id):
diff --git a/src/chat/willing/mode_custom.py b/src/chat/willing/mode_custom.py
new file mode 100644
index 00000000..9987ba94
--- /dev/null
+++ b/src/chat/willing/mode_custom.py
@@ -0,0 +1,23 @@
+from .willing_manager import BaseWillingManager
+
+NOT_IMPLEMENTED_MESSAGE = "\ncustom模式你实现了吗?没自行实现不要选custom。给你退了快点给你麦爹配置\n注:以上内容由gemini生成,如有不满请投诉gemini"
+
+class CustomWillingManager(BaseWillingManager):
+ async def async_task_starter(self) -> None:
+ raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
+
+ async def before_generate_reply_handle(self, message_id: str):
+ raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
+
+ async def after_generate_reply_handle(self, message_id: str):
+ raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
+
+ async def not_reply_handle(self, message_id: str):
+ raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
+
+ async def get_reply_probability(self, message_id: str):
+ raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
+
+ def __init__(self):
+ super().__init__()
+ raise NotImplementedError(NOT_IMPLEMENTED_MESSAGE)
diff --git a/src/chat/normal_chat/willing/mode_mxp.py b/src/chat/willing/mode_mxp.py
similarity index 81%
rename from src/chat/normal_chat/willing/mode_mxp.py
rename to src/chat/willing/mode_mxp.py
index 7b9e5556..5a13a628 100644
--- a/src/chat/normal_chat/willing/mode_mxp.py
+++ b/src/chat/willing/mode_mxp.py
@@ -25,6 +25,8 @@ import asyncio
import time
import math
+from src.chat.message_receive.chat_stream import ChatStream
+
class MxpWillingManager(BaseWillingManager):
"""Mxp意愿管理器"""
@@ -76,7 +78,7 @@ class MxpWillingManager(BaseWillingManager):
self.chat_bot_message_time[w_info.chat_id].append(current_time)
if len(self.chat_bot_message_time[w_info.chat_id]) == int(self.fatigue_messages_triggered_num):
time_interval = 60 - (current_time - self.chat_bot_message_time[w_info.chat_id].pop(0))
- self.chat_fatigue_punishment_list[w_info.chat_id].append([current_time, time_interval * 2])
+ self.chat_fatigue_punishment_list[w_info.chat_id].append((current_time, time_interval * 2))
async def after_generate_reply_handle(self, message_id: str):
"""回复后处理"""
@@ -87,12 +89,14 @@ class MxpWillingManager(BaseWillingManager):
# rel_level = self._get_relationship_level_num(rel_value)
# self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += rel_level * 0.05
- now_chat_new_person = self.last_response_person.get(w_info.chat_id, [w_info.person_id, 0])
+ now_chat_new_person = self.last_response_person.get(w_info.chat_id, (w_info.person_id, 0))
if now_chat_new_person[0] == w_info.person_id:
if now_chat_new_person[1] < 3:
- now_chat_new_person[1] += 1
+ tmp_list = list(now_chat_new_person)
+ tmp_list[1] += 1 # type: ignore
+ self.last_response_person[w_info.chat_id] = tuple(tmp_list) # type: ignore
else:
- self.last_response_person[w_info.chat_id] = [w_info.person_id, 0]
+ self.last_response_person[w_info.chat_id] = (w_info.person_id, 0)
async def not_reply_handle(self, message_id: str):
"""不回复处理"""
@@ -108,11 +112,12 @@ class MxpWillingManager(BaseWillingManager):
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] += self.single_chat_gain * (
2 * self.last_response_person[w_info.chat_id][1] - 1
)
- now_chat_new_person = self.last_response_person.get(w_info.chat_id, ["", 0])
+ now_chat_new_person = self.last_response_person.get(w_info.chat_id, ("", 0))
if now_chat_new_person[0] != w_info.person_id:
- self.last_response_person[w_info.chat_id] = [w_info.person_id, 0]
+ self.last_response_person[w_info.chat_id] = (w_info.person_id, 0)
async def get_reply_probability(self, message_id: str):
+ # sourcery skip: merge-duplicate-blocks, remove-redundant-if
"""获取回复概率"""
async with self.lock:
w_info = self.ongoing_messages[message_id]
@@ -121,17 +126,16 @@ class MxpWillingManager(BaseWillingManager):
self.logger.debug(f"基础意愿值:{current_willing}")
if w_info.is_mentioned_bot:
- current_willing_ = self.mention_willing_gain / (int(current_willing) + 1)
- current_willing += current_willing_
+ willing_gain = self.mention_willing_gain / (int(current_willing) + 1)
+ current_willing += willing_gain
if self.is_debug:
- self.logger.debug(f"提及增益:{current_willing_}")
+ self.logger.debug(f"提及增益:{willing_gain}")
if w_info.interested_rate > 0:
- current_willing += math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
+ willing_gain = math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain
+ current_willing += willing_gain
if self.is_debug:
- self.logger.debug(
- f"兴趣增益:{math.atan(w_info.interested_rate / 2) / math.pi * 2 * self.interest_willing_gain}"
- )
+ self.logger.debug(f"兴趣增益:{willing_gain}")
self.chat_person_reply_willing[w_info.chat_id][w_info.person_id] = current_willing
@@ -152,8 +156,8 @@ class MxpWillingManager(BaseWillingManager):
self.logger.debug(f"疲劳衰减:{self.chat_fatigue_willing_attenuation.get(w_info.chat_id, 0)}")
chat_ongoing_messages = [msg for msg in self.ongoing_messages.values() if msg.chat_id == w_info.chat_id]
- chat_person_ogoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id]
- if len(chat_person_ogoing_messages) >= 2:
+ chat_person_ongoing_messages = [msg for msg in chat_ongoing_messages if msg.person_id == w_info.person_id]
+ if len(chat_person_ongoing_messages) >= 2:
current_willing = 0
if self.is_debug:
self.logger.debug("进行中消息惩罚:归0")
@@ -191,34 +195,33 @@ class MxpWillingManager(BaseWillingManager):
basic_willing + (willing - basic_willing) * self.intention_decay_rate
)
- def setup(self, message, chat, is_mentioned_bot, interested_rate):
- super().setup(message, chat, is_mentioned_bot, interested_rate)
-
- self.chat_reply_willing[chat.stream_id] = self.chat_reply_willing.get(
- chat.stream_id, self.basic_maximum_willing
- )
- self.chat_person_reply_willing[chat.stream_id] = self.chat_person_reply_willing.get(chat.stream_id, {})
- self.chat_person_reply_willing[chat.stream_id][
- self.ongoing_messages[message.message_info.message_id].person_id
- ] = self.chat_person_reply_willing[chat.stream_id].get(
- self.ongoing_messages[message.message_info.message_id].person_id, self.chat_reply_willing[chat.stream_id]
+ def setup(self, message: dict, chat_stream: ChatStream):
+ super().setup(message, chat_stream)
+ stream_id = chat_stream.stream_id
+ self.chat_reply_willing[stream_id] = self.chat_reply_willing.get(stream_id, self.basic_maximum_willing)
+ self.chat_person_reply_willing[stream_id] = self.chat_person_reply_willing.get(stream_id, {})
+ self.chat_person_reply_willing[stream_id][self.ongoing_messages[message.get("message_id", "")].person_id] = (
+ self.chat_person_reply_willing[stream_id].get(
+ self.ongoing_messages[message.get("message_id", "")].person_id,
+ self.chat_reply_willing[stream_id],
+ )
)
current_time = time.time()
- if chat.stream_id not in self.chat_new_message_time:
- self.chat_new_message_time[chat.stream_id] = []
- self.chat_new_message_time[chat.stream_id].append(current_time)
- if len(self.chat_new_message_time[chat.stream_id]) > self.number_of_message_storage:
- self.chat_new_message_time[chat.stream_id].pop(0)
+ if stream_id not in self.chat_new_message_time:
+ self.chat_new_message_time[stream_id] = []
+ self.chat_new_message_time[stream_id].append(current_time)
+ if len(self.chat_new_message_time[stream_id]) > self.number_of_message_storage:
+ self.chat_new_message_time[stream_id].pop(0)
- if chat.stream_id not in self.chat_fatigue_punishment_list:
- self.chat_fatigue_punishment_list[chat.stream_id] = [
+ if stream_id not in self.chat_fatigue_punishment_list:
+ self.chat_fatigue_punishment_list[stream_id] = [
(
current_time,
self.number_of_message_storage * self.basic_maximum_willing / self.expected_replies_per_min * 60,
)
]
- self.chat_fatigue_willing_attenuation[chat.stream_id] = (
+ self.chat_fatigue_willing_attenuation[stream_id] = (
-2 * self.basic_maximum_willing * self.fatigue_coefficient
)
@@ -227,12 +230,11 @@ class MxpWillingManager(BaseWillingManager):
"""意愿值转化为概率"""
willing = max(0, willing)
if willing < 2:
- probability = math.atan(willing * 2) / math.pi * 2
+ return math.atan(willing * 2) / math.pi * 2
elif willing < 2.5:
- probability = math.atan(willing * 4) / math.pi * 2
+ return math.atan(willing * 4) / math.pi * 2
else:
- probability = 1
- return probability
+ return 1
async def _chat_new_message_to_change_basic_willing(self):
"""聊天流新消息改变基础意愿"""
@@ -259,7 +261,7 @@ class MxpWillingManager(BaseWillingManager):
update_time = 20
elif len(message_times) == self.number_of_message_storage:
time_interval = current_time - message_times[0]
- basic_willing = self._basic_willing_culculate(time_interval)
+ basic_willing = self._basic_willing_calculate(time_interval)
self.chat_reply_willing[chat_id] = basic_willing
update_time = 17 * basic_willing / self.basic_maximum_willing + 3
else:
@@ -268,7 +270,7 @@ class MxpWillingManager(BaseWillingManager):
if self.is_debug:
self.logger.debug(f"聊天流意愿值更新:{self.chat_reply_willing}")
- def _basic_willing_culculate(self, t: float) -> float:
+ def _basic_willing_calculate(self, t: float) -> float:
"""基础意愿值计算"""
return math.tan(t * self.expected_replies_per_min * math.pi / 120 / self.number_of_message_storage) / 2
diff --git a/src/chat/normal_chat/willing/willing_manager.py b/src/chat/willing/willing_manager.py
similarity index 92%
rename from src/chat/normal_chat/willing/willing_manager.py
rename to src/chat/willing/willing_manager.py
index 0fa701f9..6b946f92 100644
--- a/src/chat/normal_chat/willing/willing_manager.py
+++ b/src/chat/willing/willing_manager.py
@@ -1,14 +1,15 @@
-from src.common.logger import get_logger
+import importlib
+import asyncio
+
+from abc import ABC, abstractmethod
+from typing import Dict, Optional, Any
+from rich.traceback import install
from dataclasses import dataclass
+
+from src.common.logger import get_logger
from src.config.config import global_config
from src.chat.message_receive.chat_stream import ChatStream, GroupInfo
-from src.chat.message_receive.message import MessageRecv
from src.person_info.person_info import PersonInfoManager, get_person_info_manager
-from abc import ABC, abstractmethod
-import importlib
-from typing import Dict, Optional
-import asyncio
-from rich.traceback import install
install(extra_lines=3)
@@ -52,7 +53,7 @@ class WillingInfo:
interested_rate (float): 兴趣度
"""
- message: MessageRecv
+ message: Dict[str, Any] # 原始消息数据
chat: ChatStream
person_info_manager: PersonInfoManager
chat_id: str
@@ -91,19 +92,19 @@ class BaseWillingManager(ABC):
self.lock = asyncio.Lock()
self.logger = logger
- def setup(self, message: MessageRecv, chat: ChatStream, is_mentioned_bot: bool, interested_rate: float):
- person_id = PersonInfoManager.get_person_id(chat.platform, chat.user_info.user_id)
- self.ongoing_messages[message.message_info.message_id] = WillingInfo(
+ def setup(self, message: dict, chat: ChatStream):
+ person_id = PersonInfoManager.get_person_id(chat.platform, chat.user_info.user_id) # type: ignore
+ self.ongoing_messages[message.get("message_id", "")] = WillingInfo(
message=message,
chat=chat,
person_info_manager=get_person_info_manager(),
chat_id=chat.stream_id,
person_id=person_id,
group_info=chat.group_info,
- is_mentioned_bot=is_mentioned_bot,
- is_emoji=message.is_emoji,
- is_picid=message.is_picid,
- interested_rate=interested_rate,
+ is_mentioned_bot=message.get("is_mentioned", False),
+ is_emoji=message.get("is_emoji", False),
+ is_picid=message.get("is_picid", False),
+ interested_rate = message.get("interest_value") or 0.0,
)
def delete(self, message_id: str):
diff --git a/src/common/database/database.py b/src/common/database/database.py
index 24966415..ca361481 100644
--- a/src/common/database/database.py
+++ b/src/common/database/database.py
@@ -54,11 +54,11 @@ class DBWrapper:
return getattr(get_db(), name)
def __getitem__(self, key):
- return get_db()[key]
+ return get_db()[key] # type: ignore
# 全局数据库访问点
-memory_db: Database = DBWrapper()
+memory_db: Database = DBWrapper() # type: ignore
# 定义数据库文件路径
ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", ".."))
diff --git a/src/common/database/database_model.py b/src/common/database/database_model.py
index 500852d0..1d0b8a39 100644
--- a/src/common/database/database_model.py
+++ b/src/common/database/database_model.py
@@ -129,6 +129,9 @@ class Messages(BaseModel):
reply_to = TextField(null=True)
+ interest_value = DoubleField(null=True)
+ is_mentioned = BooleanField(null=True)
+
# 从 chat_info 扁平化而来的字段
chat_info_stream_id = TextField()
chat_info_platform = TextField()
@@ -150,9 +153,16 @@ class Messages(BaseModel):
processed_plain_text = TextField(null=True) # 处理后的纯文本消息
display_message = TextField(null=True) # 显示的消息
- detailed_plain_text = TextField(null=True) # 详细的纯文本消息
memorized_times = IntegerField(default=0) # 被记忆的次数
+ priority_mode = TextField(null=True)
+ priority_info = TextField(null=True)
+
+ additional_config = TextField(null=True)
+ is_emoji = BooleanField(default=False)
+ is_picid = BooleanField(default=False)
+ is_command = BooleanField(default=False)
+
class Meta:
# database = db # 继承自 BaseModel
table_name = "messages"
@@ -252,14 +262,25 @@ class PersonInfo(BaseModel):
know_times = FloatField(null=True) # 认识时间 (时间戳)
know_since = FloatField(null=True) # 首次印象总结时间
last_know = FloatField(null=True) # 最后一次印象总结时间
- familiarity_value = IntegerField(null=True, default=0) # 熟悉度,0-100,从完全陌生到非常熟悉
- liking_value = IntegerField(null=True, default=50) # 好感度,0-100,从非常厌恶到十分喜欢
+ attitude = IntegerField(null=True, default=50) # 态度,0-100,从非常厌恶到十分喜欢
class Meta:
# database = db # 继承自 BaseModel
table_name = "person_info"
+class Memory(BaseModel):
+ memory_id = TextField(index=True)
+ chat_id = TextField(null=True)
+ memory_text = TextField(null=True)
+ keywords = TextField(null=True)
+ create_time = FloatField(null=True)
+ last_view_time = FloatField(null=True)
+
+ class Meta:
+ table_name = "memory"
+
+
class Knowledges(BaseModel):
"""
用于存储知识库条目的模型。
@@ -274,6 +295,23 @@ class Knowledges(BaseModel):
table_name = "knowledges"
+class Expression(BaseModel):
+ """
+ 用于存储表达风格的模型。
+ """
+
+ situation = TextField()
+ style = TextField()
+ count = FloatField()
+ last_active_time = FloatField()
+ chat_id = TextField(index=True)
+ type = TextField()
+ create_date = FloatField(null=True) # 创建日期,允许为空以兼容老数据
+
+ class Meta:
+ table_name = "expression"
+
+
class ThinkingLog(BaseModel):
chat_id = TextField(index=True)
trigger_text = TextField(null=True)
@@ -298,19 +336,6 @@ class ThinkingLog(BaseModel):
table_name = "thinking_logs"
-class RecalledMessages(BaseModel):
- """
- 用于存储撤回消息记录的模型。
- """
-
- message_id = TextField(index=True) # 被撤回的消息 ID
- time = DoubleField() # 撤回操作发生的时间戳
- stream_id = TextField() # 对应的 ChatStreams stream_id
-
- class Meta:
- table_name = "recalled_messages"
-
-
class GraphNodes(BaseModel):
"""
用于存储记忆图节点的模型
@@ -358,10 +383,11 @@ def create_tables():
OnlineTime,
PersonInfo,
Knowledges,
+ Expression,
ThinkingLog,
- RecalledMessages, # 添加新模型
GraphNodes, # 添加图节点表
GraphEdges, # 添加图边表
+ Memory,
ActionRecords, # 添加 ActionRecords 到初始化列表
]
)
@@ -383,8 +409,9 @@ def initialize_database():
OnlineTime,
PersonInfo,
Knowledges,
+ Expression,
+ Memory,
ThinkingLog,
- RecalledMessages,
GraphNodes,
GraphEdges,
ActionRecords, # 添加 ActionRecords 到初始化列表
@@ -405,9 +432,7 @@ def initialize_database():
existing_columns = {row[1] for row in cursor.fetchall()}
model_fields = set(model._meta.fields.keys())
- # 检查并添加缺失字段(原有逻辑)
- missing_fields = model_fields - existing_columns
- if missing_fields:
+ if missing_fields := model_fields - existing_columns:
logger.warning(f"表 '{table_name}' 缺失字段: {missing_fields}")
for field_name, field_obj in model._meta.fields.items():
@@ -423,14 +448,14 @@ def initialize_database():
"DateTimeField": "DATETIME",
}.get(field_type, "TEXT")
alter_sql = f"ALTER TABLE {table_name} ADD COLUMN {field_name} {sql_type}"
- if field_obj.null:
- alter_sql += " NULL"
- else:
- alter_sql += " NOT NULL"
+ alter_sql += " NULL" if field_obj.null else " NOT NULL"
if hasattr(field_obj, "default") and field_obj.default is not None:
- # 正确处理不同类型的默认值
+ # 正确处理不同类型的默认值,跳过lambda函数
default_value = field_obj.default
- if isinstance(default_value, str):
+ if callable(default_value):
+ # 跳过lambda函数或其他可调用对象,这些无法在SQL中表示
+ pass
+ elif isinstance(default_value, str):
alter_sql += f" DEFAULT '{default_value}'"
elif isinstance(default_value, bool):
alter_sql += f" DEFAULT {int(default_value)}"
diff --git a/src/common/logger.py b/src/common/logger.py
index cf6f0740..a6bfc263 100644
--- a/src/common/logger.py
+++ b/src/common/logger.py
@@ -1,16 +1,16 @@
-import logging
-
# 使用基于时间戳的文件处理器,简单的轮转份数限制
-from pathlib import Path
-from typing import Callable, Optional
+
+import logging
import json
import threading
import time
-from datetime import datetime, timedelta
-
import structlog
import toml
+from pathlib import Path
+from typing import Callable, Optional
+from datetime import datetime, timedelta
+
# 创建logs目录
LOG_DIR = Path("logs")
LOG_DIR.mkdir(exist_ok=True)
@@ -160,7 +160,7 @@ def close_handlers():
_console_handler = None
-def remove_duplicate_handlers():
+def remove_duplicate_handlers(): # sourcery skip: for-append-to-extend, list-comprehension
"""移除重复的handler,特别是文件handler"""
root_logger = logging.getLogger()
@@ -184,7 +184,7 @@ def remove_duplicate_handlers():
# 读取日志配置
-def load_log_config():
+def load_log_config(): # sourcery skip: use-contextlib-suppress
"""从配置文件加载日志设置"""
config_path = Path("config/bot_config.toml")
default_config = {
@@ -321,60 +321,49 @@ MODULE_COLORS = {
# 核心模块
"main": "\033[1;97m", # 亮白色+粗体 (主程序)
"api": "\033[92m", # 亮绿色
- "emoji": "\033[92m", # 亮绿色
- "chat": "\033[94m", # 亮蓝色
+ "emoji": "\033[38;5;214m", # 橙黄色,偏向橙色但与replyer和action_manager不同
+ "chat": "\033[92m", # 亮蓝色
"config": "\033[93m", # 亮黄色
"common": "\033[95m", # 亮紫色
"tools": "\033[96m", # 亮青色
"lpmm": "\033[96m",
"plugin_system": "\033[91m", # 亮红色
- "experimental": "\033[97m", # 亮白色
"person_info": "\033[32m", # 绿色
- "individuality": "\033[34m", # 蓝色
+ "individuality": "\033[94m", # 显眼的亮蓝色
"manager": "\033[35m", # 紫色
"llm_models": "\033[36m", # 青色
- "plugins": "\033[31m", # 红色
- "plugin_api": "\033[33m", # 黄色
- "remote": "\033[38;5;93m", # 紫蓝色
+ "remote": "\033[38;5;242m", # 深灰色,更不显眼
"planner": "\033[36m",
"memory": "\033[34m",
- "hfc": "\033[96m",
- "base_action": "\033[96m",
- "action_manager": "\033[34m",
+ "hfc": "\033[38;5;81m", # 稍微暗一些的青色,保持可读
+ "action_manager": "\033[38;5;208m", # 橙色,不与replyer重复
# 关系系统
- "relation": "\033[38;5;201m", # 深粉色
+ "relation": "\033[38;5;139m", # 柔和的紫色,不刺眼
# 聊天相关模块
"normal_chat": "\033[38;5;81m", # 亮蓝绿色
- "normal_chat_response": "\033[38;5;123m", # 青绿色
- "normal_chat_expressor": "\033[38;5;117m", # 浅蓝色
- "normal_chat_action_modifier": "\033[38;5;111m", # 蓝色
- "normal_chat_planner": "\033[38;5;75m", # 浅蓝色
- "heartflow": "\033[38;5;213m", # 粉色
- "heartflow_utils": "\033[38;5;219m", # 浅粉色
+ "heartflow": "\033[38;5;175m", # 柔和的粉色,不显眼但保持粉色系
"sub_heartflow": "\033[38;5;207m", # 粉紫色
"subheartflow_manager": "\033[38;5;201m", # 深粉色
- "observation": "\033[38;5;141m", # 紫色
"background_tasks": "\033[38;5;240m", # 灰色
"chat_message": "\033[38;5;45m", # 青色
"chat_stream": "\033[38;5;51m", # 亮青色
- "sender": "\033[38;5;39m", # 蓝色
+ "sender": "\033[38;5;67m", # 稍微暗一些的蓝色,不显眼
"message_storage": "\033[38;5;33m", # 深蓝色
+ "expressor": "\033[38;5;166m", # 橙色
# 专注聊天模块
"replyer": "\033[38;5;166m", # 橙色
- "expressor": "\033[38;5;172m", # 黄橙色
- "planner_factory": "\033[38;5;178m", # 黄色
- "processor": "\033[38;5;184m", # 黄绿色
- "base_processor": "\033[38;5;190m", # 绿黄色
- "working_memory": "\033[38;5;22m", # 深绿色
- "memory_activator": "\033[38;5;28m", # 绿色
+ "memory_activator": "\033[34m", # 绿色
# 插件系统
+ "plugins": "\033[31m", # 红色
+ "plugin_api": "\033[33m", # 黄色
"plugin_manager": "\033[38;5;208m", # 红色
"base_plugin": "\033[38;5;202m", # 橙红色
+ "send_api": "\033[38;5;208m", # 橙色
"base_command": "\033[38;5;208m", # 橙色
"component_registry": "\033[38;5;214m", # 橙黄色
"stream_api": "\033[38;5;220m", # 黄色
"config_api": "\033[38;5;226m", # 亮黄色
- "hearflow_api": "\033[38;5;154m", # 黄绿色
+ "heartflow_api": "\033[38;5;154m", # 黄绿色
"action_apis": "\033[38;5;118m", # 绿色
"independent_apis": "\033[38;5;82m", # 绿色
"llm_api": "\033[38;5;46m", # 亮绿色
@@ -387,36 +376,65 @@ MODULE_COLORS = {
"local_storage": "\033[38;5;141m", # 紫色
"willing": "\033[38;5;147m", # 浅紫色
# 工具模块
- "tool_use": "\033[38;5;64m", # 深绿色
- "base_tool": "\033[38;5;70m", # 绿色
- "compare_numbers_tool": "\033[38;5;76m", # 浅绿色
- "change_mood_tool": "\033[38;5;82m", # 绿色
- "relationship_tool": "\033[38;5;88m", # 深红色
+ "tool_use": "\033[38;5;172m", # 橙褐色
+ "tool_executor": "\033[38;5;172m", # 橙褐色
+ "base_tool": "\033[38;5;178m", # 金黄色
# 工具和实用模块
- "prompt": "\033[38;5;99m", # 紫色
"prompt_build": "\033[38;5;105m", # 紫色
"chat_utils": "\033[38;5;111m", # 蓝色
"chat_image": "\033[38;5;117m", # 浅蓝色
- "typo_gen": "\033[38;5;123m", # 青绿色
"maibot_statistic": "\033[38;5;129m", # 紫色
# 特殊功能插件
"mute_plugin": "\033[38;5;240m", # 灰色
- "example_comprehensive": "\033[38;5;246m", # 浅灰色
"core_actions": "\033[38;5;117m", # 深红色
"tts_action": "\033[38;5;58m", # 深黄色
"doubao_pic_plugin": "\033[38;5;64m", # 深绿色
- "vtb_action": "\033[38;5;70m", # 绿色
+ # Action组件
+ "no_reply_action": "\033[38;5;196m", # 亮红色,更显眼
+ "reply_action": "\033[38;5;46m", # 亮绿色
+ "base_action": "\033[38;5;250m", # 浅灰色
# 数据库和消息
"database_model": "\033[38;5;94m", # 橙褐色
- "maim_message": "\033[38;5;100m", # 绿褐色
- # 实验性模块
- "pfc": "\033[38;5;252m", # 浅灰色
+ "maim_message": "\033[38;5;140m", # 紫褐色
# 日志系统
"logger": "\033[38;5;8m", # 深灰色
- "demo": "\033[38;5;15m", # 白色
"confirm": "\033[1;93m", # 黄色+粗体
# 模型相关
"model_utils": "\033[38;5;164m", # 紫红色
+ "relationship_fetcher": "\033[38;5;170m", # 浅紫色
+ "relationship_builder": "\033[38;5;93m", # 浅蓝色
+
+ #s4u
+ "context_web_api": "\033[38;5;240m", # 深灰色
+ "S4U_chat": "\033[92m", # 深灰色
+}
+
+# 定义模块别名映射 - 将真实的logger名称映射到显示的别名
+MODULE_ALIASES = {
+ # 示例映射
+ "individuality": "人格特质",
+ "emoji": "表情包",
+ "no_reply_action": "摸鱼",
+ "reply_action": "回复",
+ "action_manager": "动作",
+ "memory_activator": "记忆",
+ "tool_use": "工具",
+ "expressor": "表达方式",
+ "database_model": "数据库",
+ "mood": "情绪",
+ "memory": "记忆",
+ "tool_executor": "工具",
+ "hfc": "聊天节奏",
+ "chat": "所见",
+ "plugin_manager": "插件",
+ "relationship_builder": "关系",
+ "llm_models": "模型",
+ "person_info": "人物",
+ "chat_stream": "聊天流",
+ "planner": "规划器",
+ "replyer": "言语",
+ "config": "配置",
+ "main": "主程序",
}
RESET_COLOR = "\033[0m"
@@ -426,6 +444,7 @@ class ModuleColoredConsoleRenderer:
"""自定义控制台渲染器,为不同模块提供不同颜色"""
def __init__(self, colors=True):
+ # sourcery skip: merge-duplicate-blocks, remove-redundant-if
self._colors = colors
self._config = LOG_CONFIG
@@ -457,6 +476,7 @@ class ModuleColoredConsoleRenderer:
self._enable_full_content_colors = False
def __call__(self, logger, method_name, event_dict):
+ # sourcery skip: merge-duplicate-blocks
"""渲染日志消息"""
# 获取基本信息
timestamp = event_dict.get("timestamp", "")
@@ -505,15 +525,18 @@ class ModuleColoredConsoleRenderer:
if self._colors and self._enable_module_colors and logger_name:
module_color = MODULE_COLORS.get(logger_name, "")
- # 模块名称(带颜色)
+ # 模块名称(带颜色和别名支持)
if logger_name:
+ # 获取别名,如果没有别名则使用原名称
+ display_name = MODULE_ALIASES.get(logger_name, logger_name)
+
if self._colors and self._enable_module_colors:
if module_color:
- module_part = f"{module_color}[{logger_name}]{RESET_COLOR}"
+ module_part = f"{module_color}[{display_name}]{RESET_COLOR}"
else:
- module_part = f"[{logger_name}]"
+ module_part = f"[{display_name}]"
else:
- module_part = f"[{logger_name}]"
+ module_part = f"[{display_name}]"
parts.append(module_part)
# 消息内容(确保转换为字符串)
@@ -676,7 +699,7 @@ def get_logger(name: Optional[str]) -> structlog.stdlib.BoundLogger:
"""获取logger实例,支持按名称绑定"""
if name is None:
return raw_logger
- logger = binds.get(name)
+ logger = binds.get(name) # type: ignore
if logger is None:
logger: structlog.stdlib.BoundLogger = structlog.get_logger(name).bind(logger_name=name)
binds[name] = logger
@@ -685,8 +708,8 @@ def get_logger(name: Optional[str]) -> structlog.stdlib.BoundLogger:
def configure_logging(
level: str = "INFO",
- console_level: str = None,
- file_level: str = None,
+ console_level: Optional[str] = None,
+ file_level: Optional[str] = None,
max_bytes: int = 5 * 1024 * 1024,
backup_count: int = 30,
log_dir: str = "logs",
@@ -723,19 +746,7 @@ def configure_logging(
root_logger.setLevel(getattr(logging, level.upper()))
-def set_module_color(module_name: str, color_code: str):
- """为指定模块设置颜色
- Args:
- module_name: 模块名称
- color_code: ANSI颜色代码,例如 '\033[92m' 表示亮绿色
- """
- MODULE_COLORS[module_name] = color_code
-
-
-def get_module_colors():
- """获取当前模块颜色配置"""
- return MODULE_COLORS.copy()
def reload_log_config():
@@ -743,14 +754,11 @@ def reload_log_config():
global LOG_CONFIG
LOG_CONFIG = load_log_config()
- # 重新设置handler的日志级别
- file_handler = get_file_handler()
- if file_handler:
+ if file_handler := get_file_handler():
file_level = LOG_CONFIG.get("file_log_level", LOG_CONFIG.get("log_level", "INFO"))
file_handler.setLevel(getattr(logging, file_level.upper(), logging.INFO))
- console_handler = get_console_handler()
- if console_handler:
+ if console_handler := get_console_handler():
console_level = LOG_CONFIG.get("console_log_level", LOG_CONFIG.get("log_level", "INFO"))
console_handler.setLevel(getattr(logging, console_level.upper(), logging.INFO))
@@ -794,8 +802,7 @@ def set_console_log_level(level: str):
global LOG_CONFIG
LOG_CONFIG["console_log_level"] = level.upper()
- console_handler = get_console_handler()
- if console_handler:
+ if console_handler := get_console_handler():
console_handler.setLevel(getattr(logging, level.upper(), logging.INFO))
# 重新设置root logger级别
@@ -814,8 +821,7 @@ def set_file_log_level(level: str):
global LOG_CONFIG
LOG_CONFIG["file_log_level"] = level.upper()
- file_handler = get_file_handler()
- if file_handler:
+ if file_handler := get_file_handler():
file_handler.setLevel(getattr(logging, level.upper(), logging.INFO))
# 重新设置root logger级别
@@ -931,9 +937,20 @@ def show_module_colors():
for module_name, _color_code in MODULE_COLORS.items():
# 临时创建一个该模块的logger来展示颜色
demo_logger = structlog.get_logger(module_name).bind(logger_name=module_name)
- demo_logger.info(f"这是 {module_name} 模块的颜色效果")
+ alias = MODULE_ALIASES.get(module_name, module_name)
+ if alias != module_name:
+ demo_logger.info(f"这是 {module_name} 模块的颜色效果 (显示为: {alias})")
+ else:
+ demo_logger.info(f"这是 {module_name} 模块的颜色效果")
print("=== 颜色展示结束 ===\n")
+
+ # 显示别名映射表
+ if MODULE_ALIASES:
+ print("=== 当前别名映射 ===")
+ for module_name, alias in MODULE_ALIASES.items():
+ print(f" {module_name} -> {alias}")
+ print("=== 别名映射结束 ===\n")
def format_json_for_logging(data, indent=2, ensure_ascii=False):
@@ -947,13 +964,12 @@ def format_json_for_logging(data, indent=2, ensure_ascii=False):
Returns:
str: 格式化后的JSON字符串
"""
- if isinstance(data, str):
- # 如果是JSON字符串,先解析再格式化
- parsed_data = json.loads(data)
- return json.dumps(parsed_data, indent=indent, ensure_ascii=ensure_ascii)
- else:
+ if not isinstance(data, str):
# 如果是对象,直接格式化
return json.dumps(data, indent=indent, ensure_ascii=ensure_ascii)
+ # 如果是JSON字符串,先解析再格式化
+ parsed_data = json.loads(data)
+ return json.dumps(parsed_data, indent=indent, ensure_ascii=ensure_ascii)
def cleanup_old_logs():
diff --git a/src/common/message/api.py b/src/common/message/api.py
index 59ba9d1e..eed85c0a 100644
--- a/src/common/message/api.py
+++ b/src/common/message/api.py
@@ -8,7 +8,7 @@ from src.config.config import global_config
global_api = None
-def get_global_api() -> MessageServer:
+def get_global_api() -> MessageServer: # sourcery skip: extract-method
"""获取全局MessageServer实例"""
global global_api
if global_api is None:
@@ -36,9 +36,8 @@ def get_global_api() -> MessageServer:
kwargs["custom_logger"] = maim_message_logger
# 添加token认证
- if maim_message_config.auth_token:
- if len(maim_message_config.auth_token) > 0:
- kwargs["enable_token"] = True
+ if maim_message_config.auth_token and len(maim_message_config.auth_token) > 0:
+ kwargs["enable_token"] = True
if maim_message_config.use_custom:
# 添加WSS模式支持
diff --git a/src/common/message_repository.py b/src/common/message_repository.py
index 107ee1c5..a847718b 100644
--- a/src/common/message_repository.py
+++ b/src/common/message_repository.py
@@ -1,8 +1,11 @@
-from src.common.database.database_model import Messages # 更改导入
-from src.common.logger import get_logger
import traceback
+
from typing import List, Any, Optional
from peewee import Model # 添加 Peewee Model 导入
+from src.config.config import global_config
+
+from src.common.database.database_model import Messages
+from src.common.logger import get_logger
logger = get_logger(__name__)
@@ -19,6 +22,8 @@ def find_messages(
sort: Optional[List[tuple[str, int]]] = None,
limit: int = 0,
limit_mode: str = "latest",
+ filter_bot=False,
+ filter_command=False,
) -> List[dict[str, Any]]:
"""
根据提供的过滤器、排序和限制条件查找消息。
@@ -68,6 +73,12 @@ def find_messages(
if conditions:
query = query.where(*conditions)
+ if filter_bot:
+ query = query.where(Messages.user_id != global_config.bot.qq_account)
+
+ if filter_command:
+ query = query.where(not Messages.is_command)
+
if limit > 0:
if limit_mode == "earliest":
# 获取时间最早的 limit 条记录,已经是正序
diff --git a/src/common/remote.py b/src/common/remote.py
index 955e760b..5380cd01 100644
--- a/src/common/remote.py
+++ b/src/common/remote.py
@@ -23,7 +23,7 @@ class TelemetryHeartBeatTask(AsyncTask):
self.server_url = TELEMETRY_SERVER_URL
"""遥测服务地址"""
- self.client_uuid = local_storage["mmc_uuid"] if "mmc_uuid" in local_storage else None
+ self.client_uuid: str | None = local_storage["mmc_uuid"] if "mmc_uuid" in local_storage else None # type: ignore
"""客户端UUID"""
self.info_dict = self._get_sys_info()
@@ -72,7 +72,7 @@ class TelemetryHeartBeatTask(AsyncTask):
timeout=aiohttp.ClientTimeout(total=5), # 设置超时时间为5秒
) as response:
logger.debug(f"{TELEMETRY_SERVER_URL}/stat/reg_client")
- logger.debug(local_storage["deploy_time"])
+ logger.debug(local_storage["deploy_time"]) # type: ignore
logger.debug(f"Response status: {response.status}")
if response.status == 200:
@@ -93,7 +93,7 @@ class TelemetryHeartBeatTask(AsyncTask):
except Exception as e:
import traceback
- error_msg = str(e) if str(e) else "未知错误"
+ error_msg = str(e) or "未知错误"
logger.warning(
f"请求UUID出错,不过你还是可以正常使用麦麦: {type(e).__name__}: {error_msg}"
) # 可能是网络问题
@@ -114,11 +114,11 @@ class TelemetryHeartBeatTask(AsyncTask):
"""向服务器发送心跳"""
headers = {
"Client-UUID": self.client_uuid,
- "User-Agent": f"HeartbeatClient/{self.client_uuid[:8]}",
+ "User-Agent": f"HeartbeatClient/{self.client_uuid[:8]}", # type: ignore
}
logger.debug(f"正在发送心跳到服务器: {self.server_url}")
- logger.debug(headers)
+ logger.debug(str(headers))
try:
async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session:
@@ -151,7 +151,7 @@ class TelemetryHeartBeatTask(AsyncTask):
except Exception as e:
import traceback
- error_msg = str(e) if str(e) else "未知错误"
+ error_msg = str(e) or "未知错误"
logger.warning(f"(此消息不会影响正常使用)状态未发生: {type(e).__name__}: {error_msg}")
logger.debug(f"完整错误信息: {traceback.format_exc()}")
diff --git a/src/config/auto_update.py b/src/config/auto_update.py
index 2088e362..8d097ec4 100644
--- a/src/config/auto_update.py
+++ b/src/config/auto_update.py
@@ -1,9 +1,54 @@
import shutil
import tomlkit
+from tomlkit.items import Table, KeyType
from pathlib import Path
from datetime import datetime
+def get_key_comment(toml_table, key):
+ # 获取key的注释(如果有)
+ if hasattr(toml_table, "trivia") and hasattr(toml_table.trivia, "comment"):
+ return toml_table.trivia.comment
+ if hasattr(toml_table, "value") and isinstance(toml_table.value, dict):
+ item = toml_table.value.get(key)
+ if item is not None and hasattr(item, "trivia"):
+ return item.trivia.comment
+ if hasattr(toml_table, "keys"):
+ for k in toml_table.keys():
+ if isinstance(k, KeyType) and k.key == key:
+ return k.trivia.comment
+ return None
+
+
+def compare_dicts(new, old, path=None, new_comments=None, old_comments=None, logs=None):
+ # 递归比较两个dict,找出新增和删减项,收集注释
+ if path is None:
+ path = []
+ if logs is None:
+ logs = []
+ if new_comments is None:
+ new_comments = {}
+ if old_comments is None:
+ old_comments = {}
+ # 新增项
+ for key in new:
+ if key == "version":
+ continue
+ if key not in old:
+ comment = get_key_comment(new, key)
+ logs.append(f"新增: {'.'.join(path + [str(key)])} 注释: {comment if comment else '无'}")
+ elif isinstance(new[key], (dict, Table)) and isinstance(old.get(key), (dict, Table)):
+ compare_dicts(new[key], old[key], path + [str(key)], new_comments, old_comments, logs)
+ # 删减项
+ for key in old:
+ if key == "version":
+ continue
+ if key not in new:
+ comment = get_key_comment(old, key)
+ logs.append(f"删减: {'.'.join(path + [str(key)])} 注释: {comment if comment else '无'}")
+ return logs
+
+
def update_config():
print("开始更新配置文件...")
# 获取根目录路径
@@ -45,16 +90,26 @@ def update_config():
# 检查version是否相同
if old_config and "inner" in old_config and "inner" in new_config:
- old_version = old_config["inner"].get("version")
- new_version = new_config["inner"].get("version")
+ old_version = old_config["inner"].get("version") # type: ignore
+ new_version = new_config["inner"].get("version") # type: ignore
if old_version and new_version and old_version == new_version:
print(f"检测到版本号相同 (v{old_version}),跳过更新")
# 如果version相同,恢复旧配置文件并返回
- shutil.move(old_backup_path, old_config_path)
+ shutil.move(old_backup_path, old_config_path) # type: ignore
return
else:
print(f"检测到版本号不同: 旧版本 v{old_version} -> 新版本 v{new_version}")
+ # 输出新增和删减项及注释
+ if old_config:
+ print("配置项变动如下:")
+ logs = compare_dicts(new_config, old_config)
+ if logs:
+ for log in logs:
+ print(log)
+ else:
+ print("无新增或删减项")
+
# 递归更新配置
def update_dict(target, source):
for key, value in source.items():
@@ -62,7 +117,7 @@ def update_config():
if key == "version":
continue
if key in target:
- if isinstance(value, dict) and isinstance(target[key], (dict, tomlkit.items.Table)):
+ if isinstance(value, dict) and isinstance(target[key], (dict, Table)):
update_dict(target[key], value)
else:
try:
@@ -85,10 +140,7 @@ def update_config():
if value and isinstance(value[0], dict) and "regex" in value[0]:
contains_regex = True
- if contains_regex:
- target[key] = value
- else:
- target[key] = tomlkit.array(value)
+ target[key] = value if contains_regex else tomlkit.array(str(value))
else:
# 其他类型使用item方法创建新值
target[key] = tomlkit.item(value)
diff --git a/src/config/config.py b/src/config/config.py
index 33561c48..fae2ea2a 100644
--- a/src/config/config.py
+++ b/src/config/config.py
@@ -1,25 +1,21 @@
import os
-from dataclasses import field, dataclass
-
import tomlkit
import shutil
+
from datetime import datetime
-
from tomlkit import TOMLDocument
-from tomlkit.items import Table
-
-from src.common.logger import get_logger
+from tomlkit.items import Table, KeyType
+from dataclasses import field, dataclass
from rich.traceback import install
+from src.common.logger import get_logger
from src.config.config_base import ConfigBase
from src.config.official_configs import (
BotConfig,
PersonalityConfig,
- IdentityConfig,
ExpressionConfig,
ChatConfig,
NormalChatConfig,
- FocusChatConfig,
EmojiConfig,
MemoryConfig,
MoodConfig,
@@ -35,6 +31,9 @@ from src.config.official_configs import (
LPMMKnowledgeConfig,
RelationshipConfig,
ToolConfig,
+ VoiceConfig,
+ DebugConfig,
+ CustomPromptConfig,
)
install(extra_lines=3)
@@ -50,17 +49,167 @@ TEMPLATE_DIR = os.path.join(PROJECT_ROOT, "template")
# 考虑到,实际上配置文件中的mai_version是不会自动更新的,所以采用硬编码
# 对该字段的更新,请严格参照语义化版本规范:https://semver.org/lang/zh-CN/
-MMC_VERSION = "0.8.1"
+MMC_VERSION = "0.9.0"
+
+
+def get_key_comment(toml_table, key):
+ # 获取key的注释(如果有)
+ if hasattr(toml_table, "trivia") and hasattr(toml_table.trivia, "comment"):
+ return toml_table.trivia.comment
+ if hasattr(toml_table, "value") and isinstance(toml_table.value, dict):
+ item = toml_table.value.get(key)
+ if item is not None and hasattr(item, "trivia"):
+ return item.trivia.comment
+ if hasattr(toml_table, "keys"):
+ for k in toml_table.keys():
+ if isinstance(k, KeyType) and k.key == key:
+ return k.trivia.comment
+ return None
+
+
+def compare_dicts(new, old, path=None, logs=None):
+ # 递归比较两个dict,找出新增和删减项,收集注释
+ if path is None:
+ path = []
+ if logs is None:
+ logs = []
+ # 新增项
+ for key in new:
+ if key == "version":
+ continue
+ if key not in old:
+ comment = get_key_comment(new, key)
+ logs.append(f"新增: {'.'.join(path + [str(key)])} 注释: {comment if comment else '无'}")
+ elif isinstance(new[key], (dict, Table)) and isinstance(old.get(key), (dict, Table)):
+ compare_dicts(new[key], old[key], path + [str(key)], logs)
+ # 删减项
+ for key in old:
+ if key == "version":
+ continue
+ if key not in new:
+ comment = get_key_comment(old, key)
+ logs.append(f"删减: {'.'.join(path + [str(key)])} 注释: {comment if comment else '无'}")
+ return logs
+
+
+def get_value_by_path(d, path):
+ for k in path:
+ if isinstance(d, dict) and k in d:
+ d = d[k]
+ else:
+ return None
+ return d
+
+
+def set_value_by_path(d, path, value):
+ for k in path[:-1]:
+ if k not in d or not isinstance(d[k], dict):
+ d[k] = {}
+ d = d[k]
+ d[path[-1]] = value
+
+
+def compare_default_values(new, old, path=None, logs=None, changes=None):
+ # 递归比较两个dict,找出默认值变化项
+ if path is None:
+ path = []
+ if logs is None:
+ logs = []
+ if changes is None:
+ changes = []
+ for key in new:
+ if key == "version":
+ continue
+ if key in old:
+ if isinstance(new[key], (dict, Table)) and isinstance(old[key], (dict, Table)):
+ compare_default_values(new[key], old[key], path + [str(key)], logs, changes)
+ else:
+ # 只要值发生变化就记录
+ if new[key] != old[key]:
+ logs.append(
+ f"默认值变化: {'.'.join(path + [str(key)])} 旧默认值: {old[key]} 新默认值: {new[key]}"
+ )
+ changes.append((path + [str(key)], old[key], new[key]))
+ return logs, changes
def update_config():
# 获取根目录路径
old_config_dir = os.path.join(CONFIG_DIR, "old")
+ compare_dir = os.path.join(TEMPLATE_DIR, "compare")
# 定义文件路径
template_path = os.path.join(TEMPLATE_DIR, "bot_config_template.toml")
old_config_path = os.path.join(CONFIG_DIR, "bot_config.toml")
new_config_path = os.path.join(CONFIG_DIR, "bot_config.toml")
+ compare_path = os.path.join(compare_dir, "bot_config_template.toml")
+
+ # 创建compare目录(如果不存在)
+ os.makedirs(compare_dir, exist_ok=True)
+
+ # 处理compare下的模板文件
+ def get_version_from_toml(toml_path):
+ if not os.path.exists(toml_path):
+ return None
+ with open(toml_path, "r", encoding="utf-8") as f:
+ doc = tomlkit.load(f)
+ if "inner" in doc and "version" in doc["inner"]: # type: ignore
+ return doc["inner"]["version"] # type: ignore
+ return None
+
+ template_version = get_version_from_toml(template_path)
+ compare_version = get_version_from_toml(compare_path)
+
+ def version_tuple(v):
+ if v is None:
+ return (0,)
+ return tuple(int(x) if x.isdigit() else 0 for x in str(v).replace("v", "").split("-")[0].split("."))
+
+ # 先读取 compare 下的模板(如果有),用于默认值变动检测
+ if os.path.exists(compare_path):
+ with open(compare_path, "r", encoding="utf-8") as f:
+ compare_config = tomlkit.load(f)
+ else:
+ compare_config = None
+
+ # 读取当前模板
+ with open(template_path, "r", encoding="utf-8") as f:
+ new_config = tomlkit.load(f)
+
+ # 检查默认值变化并处理(只有 compare_config 存在时才做)
+ if compare_config is not None:
+ # 读取旧配置
+ with open(old_config_path, "r", encoding="utf-8") as f:
+ old_config = tomlkit.load(f)
+ logs, changes = compare_default_values(new_config, compare_config)
+ if logs:
+ logger.info("检测到模板默认值变动如下:")
+ for log in logs:
+ logger.info(log)
+ # 检查旧配置是否等于旧默认值,如果是则更新为新默认值
+ for path, old_default, new_default in changes:
+ old_value = get_value_by_path(old_config, path)
+ if old_value == old_default:
+ set_value_by_path(old_config, path, new_default)
+ logger.info(
+ f"已自动将配置 {'.'.join(path)} 的值从旧默认值 {old_default} 更新为新默认值 {new_default}"
+ )
+ else:
+ logger.info("未检测到模板默认值变动")
+ # 保存旧配置的变更(后续合并逻辑会用到 old_config)
+ else:
+ old_config = None
+
+ # 检查 compare 下没有模板,或新模板版本更高,则复制
+ if not os.path.exists(compare_path):
+ shutil.copy2(template_path, compare_path)
+ logger.info(f"已将模板文件复制到: {compare_path}")
+ else:
+ if version_tuple(template_version) > version_tuple(compare_version):
+ shutil.copy2(template_path, compare_path)
+ logger.info(f"模板版本较新,已替换compare下的模板: {compare_path}")
+ else:
+ logger.debug(f"compare下的模板版本不低于当前模板,无需替换: {compare_path}")
# 检查配置文件是否存在
if not os.path.exists(old_config_path):
@@ -71,21 +220,25 @@ def update_config():
# 如果是新创建的配置文件,直接返回
quit()
- # 读取旧配置文件和模板文件
- with open(old_config_path, "r", encoding="utf-8") as f:
- old_config = tomlkit.load(f)
- with open(template_path, "r", encoding="utf-8") as f:
- new_config = tomlkit.load(f)
+ # 读取旧配置文件和模板文件(如果前面没读过 old_config,这里再读一次)
+ if old_config is None:
+ with open(old_config_path, "r", encoding="utf-8") as f:
+ old_config = tomlkit.load(f)
+ # new_config 已经读取
+
+ # 读取 compare_config 只用于默认值变动检测,后续合并逻辑不再用
# 检查version是否相同
if old_config and "inner" in old_config and "inner" in new_config:
- old_version = old_config["inner"].get("version")
- new_version = new_config["inner"].get("version")
+ old_version = old_config["inner"].get("version") # type: ignore
+ new_version = new_config["inner"].get("version") # type: ignore
if old_version and new_version and old_version == new_version:
logger.info(f"检测到配置文件版本号相同 (v{old_version}),跳过更新")
return
else:
- logger.info(f"检测到版本号不同: 旧版本 v{old_version} -> 新版本 v{new_version}")
+ logger.info(
+ f"\n----------------------------------------\n检测到版本号不同: 旧版本 v{old_version} -> 新版本 v{new_version}\n----------------------------------------"
+ )
else:
logger.info("已有配置文件未检测到版本号,可能是旧版本。将进行更新")
@@ -102,7 +255,17 @@ def update_config():
shutil.copy2(template_path, new_config_path)
logger.info(f"已创建新配置文件: {new_config_path}")
- def update_dict(target: TOMLDocument | dict, source: TOMLDocument | dict):
+ # 输出新增和删减项及注释
+ if old_config:
+ logger.info("配置项变动如下:\n----------------------------------------")
+ logs = compare_dicts(new_config, old_config)
+ if logs:
+ for log in logs:
+ logger.info(log)
+ else:
+ logger.info("无新增或删减项")
+
+ def update_dict(target: TOMLDocument | dict | Table, source: TOMLDocument | dict):
"""
将source字典的值更新到target字典中(如果target中存在相同的键)
"""
@@ -111,8 +274,9 @@ def update_config():
if key == "version":
continue
if key in target:
- if isinstance(value, dict) and isinstance(target[key], (dict, Table)):
- update_dict(target[key], value)
+ target_value = target[key]
+ if isinstance(value, dict) and isinstance(target_value, (dict, Table)):
+ update_dict(target_value, value)
else:
try:
# 对数组类型进行特殊处理
@@ -145,12 +309,10 @@ class Config(ConfigBase):
bot: BotConfig
personality: PersonalityConfig
- identity: IdentityConfig
relationship: RelationshipConfig
chat: ChatConfig
message_receive: MessageReceiveConfig
normal_chat: NormalChatConfig
- focus_chat: FocusChatConfig
emoji: EmojiConfig
expression: ExpressionConfig
memory: MemoryConfig
@@ -165,7 +327,9 @@ class Config(ConfigBase):
maim_message: MaimMessageConfig
lpmm_knowledge: LPMMKnowledgeConfig
tool: ToolConfig
-
+ debug: DebugConfig
+ custom_prompt: CustomPromptConfig
+ voice: VoiceConfig
def load_config(config_path: str) -> Config:
"""
diff --git a/src/config/config_base.py b/src/config/config_base.py
index 6c414f0b..5fb39819 100644
--- a/src/config/config_base.py
+++ b/src/config/config_base.py
@@ -43,7 +43,7 @@ class ConfigBase:
field_type = f.type
try:
- init_args[field_name] = cls._convert_field(value, field_type)
+ init_args[field_name] = cls._convert_field(value, field_type) # type: ignore
except TypeError as e:
raise TypeError(f"Field '{field_name}' has a type error: {e}") from e
except Exception as e:
@@ -94,7 +94,7 @@ class ConfigBase:
raise TypeError(
f"Expected {len(field_type_args)} items for {field_type.__name__}, got {len(value)}"
)
- return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args))
+ return tuple(cls._convert_field(item, arg) for item, arg in zip(value, field_type_args, strict=False))
if field_origin_type is dict:
# 检查提供的value是否为dict
diff --git a/src/config/official_configs.py b/src/config/official_configs.py
index 7dc63089..1a14b47c 100644
--- a/src/config/official_configs.py
+++ b/src/config/official_configs.py
@@ -1,7 +1,8 @@
-from dataclasses import dataclass, field
-from typing import Any, Literal
import re
+from dataclasses import dataclass, field
+from typing import Any, Literal, Optional
+
from src.config.config_base import ConfigBase
"""
@@ -16,6 +17,9 @@ from src.config.config_base import ConfigBase
@dataclass
class BotConfig(ConfigBase):
"""QQ机器人配置类"""
+
+ platform: str
+ """平台"""
qq_account: str
"""QQ账号"""
@@ -34,21 +38,16 @@ class PersonalityConfig(ConfigBase):
personality_core: str
"""核心人格"""
- personality_sides: list[str] = field(default_factory=lambda: [])
+ personality_side: str
"""人格侧写"""
+ identity: str = ""
+ """身份特征"""
+
compress_personality: bool = True
"""是否压缩人格,压缩后会精简人格信息,节省token消耗并提高回复性能,但是会丢失一些信息,如果人设不长,可以关闭"""
-
-@dataclass
-class IdentityConfig(ConfigBase):
- """个体特征配置类"""
-
- identity_detail: list[str] = field(default_factory=lambda: [])
- """身份特征"""
-
- compress_indentity: bool = True
+ compress_identity: bool = True
"""是否压缩身份,压缩后会精简身份信息,节省token消耗并提高回复性能,但是会丢失一些信息,如果不长,可以关闭"""
@@ -57,24 +56,16 @@ class RelationshipConfig(ConfigBase):
"""关系配置类"""
enable_relationship: bool = True
-
- give_name: bool = False
- """是否给其他人取名"""
-
- build_relationship_interval: int = 600
- """构建关系间隔 单位秒,如果为0则不构建关系"""
+ """是否启用关系系统"""
relation_frequency: int = 1
- """关系频率,麦麦构建关系的速度,仅在normal_chat模式下有效"""
+ """关系频率,麦麦构建关系的速度"""
@dataclass
class ChatConfig(ConfigBase):
"""聊天配置类"""
- chat_mode: str = "normal"
- """聊天模式"""
-
max_context_size: int = 18
"""上下文长度"""
@@ -84,9 +75,15 @@ class ChatConfig(ConfigBase):
选择普通模型的概率为 1 - reasoning_normal_model_probability
"""
+ thinking_timeout: int = 30
+ """麦麦最长思考规划时间,超过这个时间的思考会放弃(往往是api反应太慢)"""
+
talk_frequency: float = 1
"""回复频率阈值"""
+ use_s4u_prompt_mode: bool = False
+ """是否使用 s4u 对话构建模式,该模式会分开处理当前对话对象和其他所有对话的内容进行 prompt 构建"""
+
# 修改:基于时段的回复频率配置,改为数组格式
time_based_talk_frequency: list[str] = field(default_factory=lambda: [])
"""
@@ -109,13 +106,10 @@ class ChatConfig(ConfigBase):
表示从该时间开始使用该频率,直到下一个时间点
"""
- auto_focus_threshold: float = 1.0
- """自动切换到专注聊天的阈值,越低越容易进入专注聊天"""
+ focus_value: float = 1.0
+ """麦麦的专注思考能力,越低越容易专注,消耗token也越多"""
- exit_focus_threshold: float = 1.0
- """自动退出专注聊天的阈值,越低越容易退出专注聊天"""
-
- def get_current_talk_frequency(self, chat_stream_id: str = None) -> float:
+ def get_current_talk_frequency(self, chat_stream_id: Optional[str] = None) -> float:
"""
根据当前时间和聊天流获取对应的 talk_frequency
@@ -140,7 +134,7 @@ class ChatConfig(ConfigBase):
# 如果都没有匹配,返回默认值
return self.talk_frequency
- def _get_time_based_frequency(self, time_freq_list: list[str]) -> float:
+ def _get_time_based_frequency(self, time_freq_list: list[str]) -> Optional[float]:
"""
根据时间配置列表获取当前时段的频率
@@ -188,7 +182,7 @@ class ChatConfig(ConfigBase):
return current_frequency
- def _get_stream_specific_frequency(self, chat_stream_id: str) -> float:
+ def _get_stream_specific_frequency(self, chat_stream_id: str):
"""
获取特定聊天流在当前时间的频率
@@ -219,7 +213,7 @@ class ChatConfig(ConfigBase):
return None
- def _parse_stream_config_to_chat_id(self, stream_config_str: str) -> str:
+ def _parse_stream_config_to_chat_id(self, stream_config_str: str) -> Optional[str]:
"""
解析流配置字符串并生成对应的 chat_id
@@ -270,15 +264,6 @@ class MessageReceiveConfig(ConfigBase):
class NormalChatConfig(ConfigBase):
"""普通聊天配置类"""
- message_buffer: bool = False
- """消息缓冲器"""
-
- emoji_chance: float = 0.2
- """发送表情包的基础概率"""
-
- thinking_timeout: int = 120
- """最长思考时间"""
-
willing_mode: str = "classical"
"""意愿模式"""
@@ -291,38 +276,6 @@ class NormalChatConfig(ConfigBase):
at_bot_inevitable_reply: bool = False
"""@bot 必然回复"""
- enable_planner: bool = False
- """是否启用动作规划器"""
-
- gather_timeout: int = 110 # planner和generator的并行执行超时时间
- """planner和generator的并行执行超时时间"""
-
- auto_focus_threshold: float = 1.0 # 自动切换到专注模式的阈值,值越大越难触发
- """自动切换到专注模式的阈值,值越大越难触发"""
-
- fatigue_talk_frequency: float = 0.2 # 疲劳模式下的基础对话频率 (条/分钟)
- """疲劳模式下的基础对话频率 (条/分钟)"""
-
-
-@dataclass
-class FocusChatConfig(ConfigBase):
- """专注聊天配置类"""
-
- compressed_length: int = 5
- """心流上下文压缩的最短压缩长度,超过心流观察到的上下文长度,会压缩,最短压缩长度为5"""
-
- compress_length_limit: int = 5
- """最多压缩份数,超过该数值的压缩上下文会被删除"""
-
- think_interval: float = 1
- """思考间隔(秒)"""
-
- consecutive_replies: float = 1
- """连续回复能力,值越高,麦麦连续回复的概率越高"""
-
- working_memory_processor: bool = False
- """是否启用工作记忆处理器"""
-
@dataclass
class ExpressionConfig(ConfigBase):
@@ -356,12 +309,25 @@ class ToolConfig(ConfigBase):
enable_in_focus_chat: bool = True
"""是否在专注聊天中启用工具"""
+
+@dataclass
+class VoiceConfig(ConfigBase):
+ """语音识别配置类"""
+
+ enable_asr: bool = False
+ """是否启用语音识别"""
@dataclass
class EmojiConfig(ConfigBase):
"""表情包配置类"""
+ emoji_chance: float = 0.6
+ """发送表情包的基础概率"""
+
+ emoji_activate_type: str = "random"
+ """表情包激活类型,可选:random,llm,random下,表情包动作随机启用,llm下,表情包动作根据llm判断是否启用"""
+
max_reg_num: int = 200
"""表情包最大注册数量"""
@@ -429,6 +395,9 @@ class MemoryConfig(ConfigBase):
memory_ban_words: list[str] = field(default_factory=lambda: ["表情包", "图片", "回复", "聊天记录"])
"""不允许记忆的词列表"""
+
+ enable_instant_memory: bool = True
+ """是否启用即时记忆"""
@dataclass
@@ -494,6 +463,13 @@ class KeywordReactionConfig(ConfigBase):
if not isinstance(rule, KeywordRuleConfig):
raise ValueError(f"规则必须是KeywordRuleConfig类型,而不是{type(rule).__name__}")
+@dataclass
+class CustomPromptConfig(ConfigBase):
+ """自定义提示词配置类"""
+
+ image_prompt: str = ""
+ """图片提示词"""
+
@dataclass
class ResponsePostProcessConfig(ConfigBase):
@@ -548,13 +524,18 @@ class TelemetryConfig(ConfigBase):
"""是否启用遥测"""
+@dataclass
+class DebugConfig(ConfigBase):
+ """调试配置类"""
+
+ show_prompt: bool = False
+ """是否显示prompt"""
+
+
@dataclass
class ExperimentalConfig(ConfigBase):
"""实验功能配置类"""
- debug_show_chat_mode: bool = False
- """是否在回复后显示当前聊天模式"""
-
enable_friend_chat: bool = False
"""是否启用好友聊天"""
@@ -628,6 +609,9 @@ class LPMMKnowledgeConfig(ConfigBase):
qa_res_top_k: int = 10
"""QA最终结果的Top K数量"""
+ embedding_dimension: int = 1024
+ """嵌入向量维度,应该与模型的输出维度一致"""
+
@dataclass
class ModelConfig(ConfigBase):
@@ -647,14 +631,17 @@ class ModelConfig(ConfigBase):
replyer_2: dict[str, Any] = field(default_factory=lambda: {})
"""normal_chat次要回复模型配置"""
- memory_summary: dict[str, Any] = field(default_factory=lambda: {})
- """记忆的概括模型配置"""
+ memory: dict[str, Any] = field(default_factory=lambda: {})
+ """记忆模型配置"""
+
+ emotion: dict[str, Any] = field(default_factory=lambda: {})
+ """情绪模型配置"""
vlm: dict[str, Any] = field(default_factory=lambda: {})
"""视觉语言模型配置"""
- focus_working_memory: dict[str, Any] = field(default_factory=lambda: {})
- """专注工作记忆模型配置"""
+ voice: dict[str, Any] = field(default_factory=lambda: {})
+ """语音识别模型配置"""
tool_use: dict[str, Any] = field(default_factory=lambda: {})
"""专注工具使用模型配置"""
@@ -662,17 +649,14 @@ class ModelConfig(ConfigBase):
planner: dict[str, Any] = field(default_factory=lambda: {})
"""规划模型配置"""
- relation: dict[str, Any] = field(default_factory=lambda: {})
- """关系模型配置"""
-
embedding: dict[str, Any] = field(default_factory=lambda: {})
"""嵌入模型配置"""
- pfc_action_planner: dict[str, Any] = field(default_factory=lambda: {})
- """PFC动作规划模型配置"""
+ lpmm_entity_extract: dict[str, Any] = field(default_factory=lambda: {})
+ """LPMM实体提取模型配置"""
- pfc_chat: dict[str, Any] = field(default_factory=lambda: {})
- """PFC聊天模型配置"""
+ lpmm_rdf_build: dict[str, Any] = field(default_factory=lambda: {})
+ """LPMM RDF构建模型配置"""
- pfc_reply_checker: dict[str, Any] = field(default_factory=lambda: {})
- """PFC回复检查模型配置"""
+ lpmm_qa: dict[str, Any] = field(default_factory=lambda: {})
+ """LPMM问答模型配置"""
diff --git a/src/experimental/PFC/action_planner.py b/src/experimental/PFC/action_planner.py
deleted file mode 100644
index e7045f2a..00000000
--- a/src/experimental/PFC/action_planner.py
+++ /dev/null
@@ -1,490 +0,0 @@
-import time
-from typing import Tuple, Optional # 增加了 Optional
-from src.common.logger import get_logger
-from src.llm_models.utils_model import LLMRequest
-from src.config.config import global_config
-from src.experimental.PFC.chat_observer import ChatObserver
-from src.experimental.PFC.pfc_utils import get_items_from_json
-from src.individuality.individuality import get_individuality
-from src.experimental.PFC.observation_info import ObservationInfo
-from src.experimental.PFC.conversation_info import ConversationInfo
-from src.chat.utils.chat_message_builder import build_readable_messages
-
-
-logger = get_logger("pfc_action_planner")
-
-
-# --- 定义 Prompt 模板 ---
-
-# Prompt(1): 首次回复或非连续回复时的决策 Prompt
-PROMPT_INITIAL_REPLY = """{persona_text}。现在你在参与一场QQ私聊,请根据以下【所有信息】审慎且灵活的决策下一步行动,可以回复,可以倾听,可以调取知识,甚至可以屏蔽对方:
-
-【当前对话目标】
-{goals_str}
-{knowledge_info_str}
-
-【最近行动历史概要】
-{action_history_summary}
-【上一次行动的详细情况和结果】
-{last_action_context}
-【时间和超时提示】
-{time_since_last_bot_message_info}{timeout_context}
-【最近的对话记录】(包括你已成功发送的消息 和 新收到的消息)
-{chat_history_text}
-
-------
-可选行动类型以及解释:
-fetch_knowledge: 需要调取知识或记忆,当需要专业知识或特定信息时选择,对方若提到你不太认识的人名或实体也可以尝试选择
-listening: 倾听对方发言,当你认为对方话才说到一半,发言明显未结束时选择
-direct_reply: 直接回复对方
-rethink_goal: 思考一个对话目标,当你觉得目前对话需要目标,或当前目标不再适用,或话题卡住时选择。注意私聊的环境是灵活的,有可能需要经常选择
-end_conversation: 结束对话,对方长时间没回复或者当你觉得对话告一段落时可以选择
-block_and_ignore: 更加极端的结束对话方式,直接结束对话并在一段时间内无视对方所有发言(屏蔽),当对话让你感到十分不适,或你遭到各类骚扰时选择
-
-请以JSON格式输出你的决策:
-{{
- "action": "选择的行动类型 (必须是上面列表中的一个)",
- "reason": "选择该行动的详细原因 (必须有解释你是如何根据“上一次行动结果”、“对话记录”和自身设定人设做出合理判断的)"
-}}
-
-注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
-
-# Prompt(2): 上一次成功回复后,决定继续发言时的决策 Prompt
-PROMPT_FOLLOW_UP = """{persona_text}。现在你在参与一场QQ私聊,刚刚你已经回复了对方,请根据以下【所有信息】审慎且灵活的决策下一步行动,可以继续发送新消息,可以等待,可以倾听,可以调取知识,甚至可以屏蔽对方:
-
-【当前对话目标】
-{goals_str}
-{knowledge_info_str}
-
-【最近行动历史概要】
-{action_history_summary}
-【上一次行动的详细情况和结果】
-{last_action_context}
-【时间和超时提示】
-{time_since_last_bot_message_info}{timeout_context}
-【最近的对话记录】(包括你已成功发送的消息 和 新收到的消息)
-{chat_history_text}
-
-------
-可选行动类型以及解释:
-fetch_knowledge: 需要调取知识,当需要专业知识或特定信息时选择,对方若提到你不太认识的人名或实体也可以尝试选择
-wait: 暂时不说话,留给对方交互空间,等待对方回复(尤其是在你刚发言后、或上次发言因重复、发言过多被拒时、或不确定做什么时,这是不错的选择)
-listening: 倾听对方发言(虽然你刚发过言,但如果对方立刻回复且明显话没说完,可以选择这个)
-send_new_message: 发送一条新消息继续对话,允许适当的追问、补充、深入话题,或开启相关新话题。**但是避免在因重复被拒后立即使用,也不要在对方没有回复的情况下过多的“消息轰炸”或重复发言**
-rethink_goal: 思考一个对话目标,当你觉得目前对话需要目标,或当前目标不再适用,或话题卡住时选择。注意私聊的环境是灵活的,有可能需要经常选择
-end_conversation: 结束对话,对方长时间没回复或者当你觉得对话告一段落时可以选择
-block_and_ignore: 更加极端的结束对话方式,直接结束对话并在一段时间内无视对方所有发言(屏蔽),当对话让你感到十分不适,或你遭到各类骚扰时选择
-
-请以JSON格式输出你的决策:
-{{
- "action": "选择的行动类型 (必须是上面列表中的一个)",
- "reason": "选择该行动的详细原因 (必须有解释你是如何根据“上一次行动结果”、“对话记录”和自身设定人设做出合理判断的。请说明你为什么选择继续发言而不是等待,以及打算发送什么类型的新消息连续发言,必须记录已经发言了几次)"
-}}
-
-注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
-
-# 新增:Prompt(3): 决定是否在结束对话前发送告别语
-PROMPT_END_DECISION = """{persona_text}。刚刚你决定结束一场 QQ 私聊。
-
-【你们之前的聊天记录】
-{chat_history_text}
-
-你觉得你们的对话已经完整结束了吗?有时候,在对话自然结束后再说点什么可能会有点奇怪,但有时也可能需要一条简短的消息来圆满结束。
-如果觉得确实有必要再发一条简短、自然、符合你人设的告别消息(比如 "好,下次再聊~" 或 "嗯,先这样吧"),就输出 "yes"。
-如果觉得当前状态下直接结束对话更好,没有必要再发消息,就输出 "no"。
-
-请以 JSON 格式输出你的选择:
-{{
- "say_bye": "yes/no",
- "reason": "选择 yes 或 no 的原因和内心想法 (简要说明)"
-}}
-
-注意:请严格按照 JSON 格式输出,不要包含任何其他内容。"""
-
-
-# ActionPlanner 类定义,顶格
-class ActionPlanner:
- """行动规划器"""
-
- def __init__(self, stream_id: str, private_name: str):
- self.llm = LLMRequest(
- model=global_config.llm_PFC_action_planner,
- temperature=global_config.llm_PFC_action_planner["temp"],
- request_type="action_planning",
- )
- self.personality_info = get_individuality().get_prompt(x_person=2, level=3)
- self.name = global_config.bot.nickname
- self.private_name = private_name
- self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
- # self.action_planner_info = ActionPlannerInfo() # 移除未使用的变量
-
- # 修改 plan 方法签名,增加 last_successful_reply_action 参数
- async def plan(
- self,
- observation_info: ObservationInfo,
- conversation_info: ConversationInfo,
- last_successful_reply_action: Optional[str],
- ) -> Tuple[str, str]:
- """规划下一步行动
-
- Args:
- observation_info: 决策信息
- conversation_info: 对话信息
- last_successful_reply_action: 上一次成功的回复动作类型 ('direct_reply' 或 'send_new_message' 或 None)
-
- Returns:
- Tuple[str, str]: (行动类型, 行动原因)
- """
- # --- 获取 Bot 上次发言时间信息 ---
- # (这部分逻辑不变)
- time_since_last_bot_message_info = ""
- try:
- bot_id = str(global_config.bot.qq_account)
- if hasattr(observation_info, "chat_history") and observation_info.chat_history:
- for i in range(len(observation_info.chat_history) - 1, -1, -1):
- msg = observation_info.chat_history[i]
- if not isinstance(msg, dict):
- continue
- sender_info = msg.get("user_info", {})
- sender_id = str(sender_info.get("user_id")) if isinstance(sender_info, dict) else None
- msg_time = msg.get("time")
- if sender_id == bot_id and msg_time:
- time_diff = time.time() - msg_time
- if time_diff < 60.0:
- time_since_last_bot_message_info = (
- f"提示:你上一条成功发送的消息是在 {time_diff:.1f} 秒前。\n"
- )
- break
- else:
- logger.debug(
- f"[私聊][{self.private_name}]Observation info chat history is empty or not available for bot time check."
- )
- except AttributeError:
- logger.warning(
- f"[私聊][{self.private_name}]ObservationInfo object might not have chat_history attribute yet for bot time check."
- )
- except Exception as e:
- logger.warning(f"[私聊][{self.private_name}]获取 Bot 上次发言时间时出错: {e}")
-
- # --- 获取超时提示信息 ---
- # (这部分逻辑不变)
- timeout_context = ""
- try:
- if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
- last_goal_dict = conversation_info.goal_list[-1]
- if isinstance(last_goal_dict, dict) and "goal" in last_goal_dict:
- last_goal_text = last_goal_dict["goal"]
- if isinstance(last_goal_text, str) and "分钟,思考接下来要做什么" in last_goal_text:
- try:
- timeout_minutes_text = last_goal_text.split(",")[0].replace("你等待了", "")
- timeout_context = f"重要提示:对方已经长时间({timeout_minutes_text})没有回复你的消息了(这可能代表对方繁忙/不想回复/没注意到你的消息等情况,或在对方看来本次聊天已告一段落),请基于此情况规划下一步。\n"
- except Exception:
- timeout_context = "重要提示:对方已经长时间没有回复你的消息了(这可能代表对方繁忙/不想回复/没注意到你的消息等情况,或在对方看来本次聊天已告一段落),请基于此情况规划下一步。\n"
- else:
- logger.debug(
- f"[私聊][{self.private_name}]Conversation info goal_list is empty or not available for timeout check."
- )
- except AttributeError:
- logger.warning(
- f"[私聊][{self.private_name}]ConversationInfo object might not have goal_list attribute yet for timeout check."
- )
- except Exception as e:
- logger.warning(f"[私聊][{self.private_name}]检查超时目标时出错: {e}")
-
- # --- 构建通用 Prompt 参数 ---
- logger.debug(
- f"[私聊][{self.private_name}]开始规划行动:当前目标: {getattr(conversation_info, 'goal_list', '不可用')}"
- )
-
- # 构建对话目标 (goals_str)
- goals_str = ""
- try:
- if hasattr(conversation_info, "goal_list") and conversation_info.goal_list:
- for goal_reason in conversation_info.goal_list:
- if isinstance(goal_reason, dict):
- goal = goal_reason.get("goal", "目标内容缺失")
- reasoning = goal_reason.get("reasoning", "没有明确原因")
- else:
- goal = str(goal_reason)
- reasoning = "没有明确原因"
-
- goal = str(goal) if goal is not None else "目标内容缺失"
- reasoning = str(reasoning) if reasoning is not None else "没有明确原因"
- goals_str += f"- 目标:{goal}\n 原因:{reasoning}\n"
-
- if not goals_str:
- goals_str = "- 目前没有明确对话目标,请考虑设定一个。\n"
- else:
- goals_str = "- 目前没有明确对话目标,请考虑设定一个。\n"
- except AttributeError:
- logger.warning(
- f"[私聊][{self.private_name}]ConversationInfo object might not have goal_list attribute yet."
- )
- goals_str = "- 获取对话目标时出错。\n"
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]构建对话目标字符串时出错: {e}")
- goals_str = "- 构建对话目标时出错。\n"
-
- # --- 知识信息字符串构建开始 ---
- knowledge_info_str = "【已获取的相关知识和记忆】\n"
- try:
- # 检查 conversation_info 是否有 knowledge_list 并且不为空
- if hasattr(conversation_info, "knowledge_list") and conversation_info.knowledge_list:
- # 最多只显示最近的 5 条知识,防止 Prompt 过长
- recent_knowledge = conversation_info.knowledge_list[-5:]
- for i, knowledge_item in enumerate(recent_knowledge):
- if isinstance(knowledge_item, dict):
- query = knowledge_item.get("query", "未知查询")
- knowledge = knowledge_item.get("knowledge", "无知识内容")
- source = knowledge_item.get("source", "未知来源")
- # 只取知识内容的前 2000 个字,避免太长
- knowledge_snippet = knowledge[:2000] + "..." if len(knowledge) > 2000 else knowledge
- knowledge_info_str += (
- f"{i + 1}. 关于 '{query}' 的知识 (来源: {source}):\n {knowledge_snippet}\n"
- )
- else:
- # 处理列表里不是字典的异常情况
- knowledge_info_str += f"{i + 1}. 发现一条格式不正确的知识记录。\n"
-
- if not recent_knowledge: # 如果 knowledge_list 存在但为空
- knowledge_info_str += "- 暂无相关知识和记忆。\n"
-
- else:
- # 如果 conversation_info 没有 knowledge_list 属性,或者列表为空
- knowledge_info_str += "- 暂无相关知识记忆。\n"
- except AttributeError:
- logger.warning(f"[私聊][{self.private_name}]ConversationInfo 对象可能缺少 knowledge_list 属性。")
- knowledge_info_str += "- 获取知识列表时出错。\n"
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]构建知识信息字符串时出错: {e}")
- knowledge_info_str += "- 处理知识列表时出错。\n"
- # --- 知识信息字符串构建结束 ---
-
- # 获取聊天历史记录 (chat_history_text)
- try:
- if hasattr(observation_info, "chat_history") and observation_info.chat_history:
- chat_history_text = observation_info.chat_history_str
- if not chat_history_text:
- chat_history_text = "还没有聊天记录。\n"
- else:
- chat_history_text = "还没有聊天记录。\n"
-
- if hasattr(observation_info, "new_messages_count") and observation_info.new_messages_count > 0:
- if hasattr(observation_info, "unprocessed_messages") and observation_info.unprocessed_messages:
- new_messages_list = observation_info.unprocessed_messages
- new_messages_str = build_readable_messages(
- new_messages_list,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0,
- )
- chat_history_text += (
- f"\n--- 以下是 {observation_info.new_messages_count} 条新消息 ---\n{new_messages_str}"
- )
- else:
- logger.warning(
- f"[私聊][{self.private_name}]ObservationInfo has new_messages_count > 0 but unprocessed_messages is empty or missing."
- )
- except AttributeError:
- logger.warning(
- f"[私聊][{self.private_name}]ObservationInfo object might be missing expected attributes for chat history."
- )
- chat_history_text = "获取聊天记录时出错。\n"
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]处理聊天记录时发生未知错误: {e}")
- chat_history_text = "处理聊天记录时出错。\n"
-
- # 构建 Persona 文本 (persona_text)
- persona_text = f"你的名字是{self.name},{self.personality_info}。"
-
- # 构建行动历史和上一次行动结果 (action_history_summary, last_action_context)
- # (这部分逻辑不变)
- action_history_summary = "你最近执行的行动历史:\n"
- last_action_context = "关于你【上一次尝试】的行动:\n"
- action_history_list = []
- try:
- if hasattr(conversation_info, "done_action") and conversation_info.done_action:
- action_history_list = conversation_info.done_action[-5:]
- else:
- logger.debug(f"[私聊][{self.private_name}]Conversation info done_action is empty or not available.")
- except AttributeError:
- logger.warning(
- f"[私聊][{self.private_name}]ConversationInfo object might not have done_action attribute yet."
- )
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]访问行动历史时出错: {e}")
-
- if not action_history_list:
- action_history_summary += "- 还没有执行过行动。\n"
- last_action_context += "- 这是你规划的第一个行动。\n"
- else:
- for i, action_data in enumerate(action_history_list):
- action_type = "未知"
- plan_reason = "未知"
- status = "未知"
- final_reason = ""
- action_time = ""
-
- if isinstance(action_data, dict):
- action_type = action_data.get("action", "未知")
- plan_reason = action_data.get("plan_reason", "未知规划原因")
- status = action_data.get("status", "未知")
- final_reason = action_data.get("final_reason", "")
- action_time = action_data.get("time", "")
- elif isinstance(action_data, tuple):
- # 假设旧格式兼容
- if len(action_data) > 0:
- action_type = action_data[0]
- if len(action_data) > 1:
- plan_reason = action_data[1] # 可能是规划原因或最终原因
- if len(action_data) > 2:
- status = action_data[2]
- if status == "recall" and len(action_data) > 3:
- final_reason = action_data[3]
- elif status == "done" and action_type in ["direct_reply", "send_new_message"]:
- plan_reason = "成功发送" # 简化显示
-
- reason_text = f", 失败/取消原因: {final_reason}" if final_reason else ""
- summary_line = f"- 时间:{action_time}, 尝试行动:'{action_type}', 状态:{status}{reason_text}"
- action_history_summary += summary_line + "\n"
-
- if i == len(action_history_list) - 1:
- last_action_context += f"- 上次【规划】的行动是: '{action_type}'\n"
- last_action_context += f"- 当时规划的【原因】是: {plan_reason}\n"
- if status == "done":
- last_action_context += "- 该行动已【成功执行】。\n"
- # 记录这次成功的行动类型,供下次决策
- # self.last_successful_action_type = action_type # 不在这里记录,由 conversation 控制
- elif status == "recall":
- last_action_context += "- 但该行动最终【未能执行/被取消】。\n"
- if final_reason:
- last_action_context += f"- 【重要】失败/取消的具体原因是: “{final_reason}”\n"
- else:
- last_action_context += "- 【重要】失败/取消原因未明确记录。\n"
- # self.last_successful_action_type = None # 行动失败,清除记录
- else:
- last_action_context += f"- 该行动当前状态: {status}\n"
- # self.last_successful_action_type = None # 非完成状态,清除记录
-
- # --- 选择 Prompt ---
- if last_successful_reply_action in ["direct_reply", "send_new_message"]:
- prompt_template = PROMPT_FOLLOW_UP
- logger.debug(f"[私聊][{self.private_name}]使用 PROMPT_FOLLOW_UP (追问决策)")
- else:
- prompt_template = PROMPT_INITIAL_REPLY
- logger.debug(f"[私聊][{self.private_name}]使用 PROMPT_INITIAL_REPLY (首次/非连续回复决策)")
-
- # --- 格式化最终的 Prompt ---
- prompt = prompt_template.format(
- persona_text=persona_text,
- goals_str=goals_str if goals_str.strip() else "- 目前没有明确对话目标,请考虑设定一个。",
- action_history_summary=action_history_summary,
- last_action_context=last_action_context,
- time_since_last_bot_message_info=time_since_last_bot_message_info,
- timeout_context=timeout_context,
- chat_history_text=chat_history_text if chat_history_text.strip() else "还没有聊天记录。",
- knowledge_info_str=knowledge_info_str,
- )
-
- logger.debug(f"[私聊][{self.private_name}]发送到LLM的最终提示词:\n------\n{prompt}\n------")
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.debug(f"[私聊][{self.private_name}]LLM (行动规划) 原始返回内容: {content}")
-
- # --- 初始行动规划解析 ---
- success, initial_result = get_items_from_json(
- content,
- self.private_name,
- "action",
- "reason",
- default_values={"action": "wait", "reason": "LLM返回格式错误或未提供原因,默认等待"},
- )
-
- initial_action = initial_result.get("action", "wait")
- initial_reason = initial_result.get("reason", "LLM未提供原因,默认等待")
-
- # 检查是否需要进行结束对话决策 ---
- if initial_action == "end_conversation":
- logger.info(f"[私聊][{self.private_name}]初步规划结束对话,进入告别决策...")
-
- # 使用新的 PROMPT_END_DECISION
- end_decision_prompt = PROMPT_END_DECISION.format(
- persona_text=persona_text, # 复用之前的 persona_text
- chat_history_text=chat_history_text, # 复用之前的 chat_history_text
- )
-
- logger.debug(
- f"[私聊][{self.private_name}]发送到LLM的结束决策提示词:\n------\n{end_decision_prompt}\n------"
- )
- try:
- end_content, _ = await self.llm.generate_response_async(end_decision_prompt) # 再次调用LLM
- logger.debug(f"[私聊][{self.private_name}]LLM (结束决策) 原始返回内容: {end_content}")
-
- # 解析结束决策的JSON
- end_success, end_result = get_items_from_json(
- end_content,
- self.private_name,
- "say_bye",
- "reason",
- default_values={"say_bye": "no", "reason": "结束决策LLM返回格式错误,默认不告别"},
- required_types={"say_bye": str, "reason": str}, # 明确类型
- )
-
- say_bye_decision = end_result.get("say_bye", "no").lower() # 转小写方便比较
- end_decision_reason = end_result.get("reason", "未提供原因")
-
- if end_success and say_bye_decision == "yes":
- # 决定要告别,返回新的 'say_goodbye' 动作
- logger.info(
- f"[私聊][{self.private_name}]结束决策: yes, 准备生成告别语. 原因: {end_decision_reason}"
- )
- # 注意:这里的 reason 可以考虑拼接初始原因和结束决策原因,或者只用结束决策原因
- final_action = "say_goodbye"
- final_reason = f"决定发送告别语。决策原因: {end_decision_reason} (原结束理由: {initial_reason})"
- return final_action, final_reason
- else:
- # 决定不告别 (包括解析失败或明确说no)
- logger.info(
- f"[私聊][{self.private_name}]结束决策: no, 直接结束对话. 原因: {end_decision_reason}"
- )
- # 返回原始的 'end_conversation' 动作
- final_action = "end_conversation"
- final_reason = initial_reason # 保持原始的结束理由
- return final_action, final_reason
-
- except Exception as end_e:
- logger.error(f"[私聊][{self.private_name}]调用结束决策LLM或处理结果时出错: {str(end_e)}")
- # 出错时,默认执行原始的结束对话
- logger.warning(f"[私聊][{self.private_name}]结束决策出错,将按原计划执行 end_conversation")
- return "end_conversation", initial_reason # 返回原始动作和原因
-
- else:
- action = initial_action
- reason = initial_reason
-
- # 验证action类型 (保持不变)
- valid_actions = [
- "direct_reply",
- "send_new_message",
- "fetch_knowledge",
- "wait",
- "listening",
- "rethink_goal",
- "end_conversation", # 仍然需要验证,因为可能从上面决策后返回
- "block_and_ignore",
- "say_goodbye", # 也要验证这个新动作
- ]
- if action not in valid_actions:
- logger.warning(f"[私聊][{self.private_name}]LLM返回了未知的行动类型: '{action}',强制改为 wait")
- reason = f"(原始行动'{action}'无效,已强制改为wait) {reason}"
- action = "wait"
-
- logger.info(f"[私聊][{self.private_name}]规划的行动: {action}")
- logger.info(f"[私聊][{self.private_name}]行动原因: {reason}")
- return action, reason
-
- except Exception as e:
- # 外层异常处理保持不变
- logger.error(f"[私聊][{self.private_name}]规划行动时调用 LLM 或处理结果出错: {str(e)}")
- return "wait", f"行动规划处理中发生错误,暂时等待: {str(e)}"
diff --git a/src/experimental/PFC/chat_observer.py b/src/experimental/PFC/chat_observer.py
deleted file mode 100644
index 6021ef73..00000000
--- a/src/experimental/PFC/chat_observer.py
+++ /dev/null
@@ -1,383 +0,0 @@
-import time
-import asyncio
-import traceback
-from typing import Optional, Dict, Any, List
-from src.common.logger import get_logger
-from maim_message import UserInfo
-from src.config.config import global_config
-from src.experimental.PFC.chat_states import (
- NotificationManager,
- create_new_message_notification,
- create_cold_chat_notification,
-)
-from src.experimental.PFC.message_storage import PeeweeMessageStorage
-from rich.traceback import install
-
-install(extra_lines=3)
-
-logger = get_logger("chat_observer")
-
-
-class ChatObserver:
- """聊天状态观察器"""
-
- # 类级别的实例管理
- _instances: Dict[str, "ChatObserver"] = {}
-
- @classmethod
- def get_instance(cls, stream_id: str, private_name: str) -> "ChatObserver":
- """获取或创建观察器实例
-
- Args:
- stream_id: 聊天流ID
- private_name: 私聊名称
-
- Returns:
- ChatObserver: 观察器实例
- """
- if stream_id not in cls._instances:
- cls._instances[stream_id] = cls(stream_id, private_name)
- return cls._instances[stream_id]
-
- def __init__(self, stream_id: str, private_name: str):
- """初始化观察器
-
- Args:
- stream_id: 聊天流ID
- """
- self.last_check_time = None
- self.last_bot_speak_time = None
- self.last_user_speak_time = None
- if stream_id in self._instances:
- raise RuntimeError(f"ChatObserver for {stream_id} already exists. Use get_instance() instead.")
-
- self.stream_id = stream_id
- self.private_name = private_name
- self.message_storage = PeeweeMessageStorage()
-
- # self.last_user_speak_time: Optional[float] = None # 对方上次发言时间
- # self.last_bot_speak_time: Optional[float] = None # 机器人上次发言时间
- # self.last_check_time: float = time.time() # 上次查看聊天记录时间
- self.last_message_read: Optional[Dict[str, Any]] = None # 最后读取的消息ID
- self.last_message_time: float = time.time()
-
- self.waiting_start_time: float = time.time() # 等待开始时间,初始化为当前时间
-
- # 运行状态
- self._running: bool = False
- self._task: Optional[asyncio.Task] = None
- self._update_event = asyncio.Event() # 触发更新的事件
- self._update_complete = asyncio.Event() # 更新完成的事件
-
- # 通知管理器
- self.notification_manager = NotificationManager()
-
- # 冷场检查配置
- self.cold_chat_threshold: float = 60.0 # 60秒无消息判定为冷场
- self.last_cold_chat_check: float = time.time()
- self.is_cold_chat_state: bool = False
-
- self.update_event = asyncio.Event()
- self.update_interval = 2 # 更新间隔(秒)
- self.message_cache = []
- self.update_running = False
-
- async def check(self) -> bool:
- """检查距离上一次观察之后是否有了新消息
-
- Returns:
- bool: 是否有新消息
- """
- logger.debug(f"[私聊][{self.private_name}]检查距离上一次观察之后是否有了新消息: {self.last_check_time}")
-
- new_message_exists = await self.message_storage.has_new_messages(self.stream_id, self.last_check_time)
-
- if new_message_exists:
- logger.debug(f"[私聊][{self.private_name}]发现新消息")
- self.last_check_time = time.time()
-
- return new_message_exists
-
- async def _add_message_to_history(self, message: Dict[str, Any]):
- """添加消息到历史记录并发送通知
-
- Args:
- message: 消息数据
- """
- try:
- # 发送新消息通知
- notification = create_new_message_notification(
- sender="chat_observer", target="observation_info", message=message
- )
- # print(self.notification_manager)
- await self.notification_manager.send_notification(notification)
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]添加消息到历史记录时出错: {e}")
- print(traceback.format_exc())
-
- # 检查并更新冷场状态
- await self._check_cold_chat()
-
- async def _check_cold_chat(self):
- """检查是否处于冷场状态并发送通知"""
- current_time = time.time()
-
- # 每10秒检查一次冷场状态
- if current_time - self.last_cold_chat_check < 10:
- return
-
- self.last_cold_chat_check = current_time
-
- # 判断是否冷场
- is_cold = (
- True
- if self.last_message_time is None
- else (current_time - self.last_message_time) > self.cold_chat_threshold
- )
-
- # 如果冷场状态发生变化,发送通知
- if is_cold != self.is_cold_chat_state:
- self.is_cold_chat_state = is_cold
- notification = create_cold_chat_notification(sender="chat_observer", target="pfc", is_cold=is_cold)
- await self.notification_manager.send_notification(notification)
-
- def new_message_after(self, time_point: float) -> bool:
- """判断是否在指定时间点后有新消息
-
- Args:
- time_point: 时间戳
-
- Returns:
- bool: 是否有新消息
- """
-
- if self.last_message_time is None:
- logger.debug(f"[私聊][{self.private_name}]没有最后消息时间,返回 False")
- return False
-
- has_new = self.last_message_time > time_point
- logger.debug(
- f"[私聊][{self.private_name}]判断是否在指定时间点后有新消息: {self.last_message_time} > {time_point} = {has_new}"
- )
- return has_new
-
- def get_message_history(
- self,
- start_time: Optional[float] = None,
- end_time: Optional[float] = None,
- limit: Optional[int] = None,
- user_id: Optional[str] = None,
- ) -> List[Dict[str, Any]]:
- """获取消息历史
-
- Args:
- start_time: 开始时间戳
- end_time: 结束时间戳
- limit: 限制返回消息数量
- user_id: 指定用户ID
-
- Returns:
- List[Dict[str, Any]]: 消息列表
- """
- filtered_messages = self.message_history
-
- if start_time is not None:
- filtered_messages = [m for m in filtered_messages if m["time"] >= start_time]
-
- if end_time is not None:
- filtered_messages = [m for m in filtered_messages if m["time"] <= end_time]
-
- if user_id is not None:
- filtered_messages = [
- m for m in filtered_messages if UserInfo.from_dict(m.get("user_info", {})).user_id == user_id
- ]
-
- if limit is not None:
- filtered_messages = filtered_messages[-limit:]
-
- return filtered_messages
-
- async def _fetch_new_messages(self) -> List[Dict[str, Any]]:
- """获取新消息
-
- Returns:
- List[Dict[str, Any]]: 新消息列表
- """
- new_messages = await self.message_storage.get_messages_after(self.stream_id, self.last_message_time)
-
- if new_messages:
- self.last_message_read = new_messages[-1]
- self.last_message_time = new_messages[-1]["time"]
-
- # print(f"获取数据库中找到的新消息: {new_messages}")
-
- return new_messages
-
- async def _fetch_new_messages_before(self, time_point: float) -> List[Dict[str, Any]]:
- """获取指定时间点之前的消息
-
- Args:
- time_point: 时间戳
-
- Returns:
- List[Dict[str, Any]]: 最多5条消息
- """
- new_messages = await self.message_storage.get_messages_before(self.stream_id, time_point)
-
- if new_messages:
- self.last_message_read = new_messages[-1]["message_id"]
-
- logger.debug(f"[私聊][{self.private_name}]获取指定时间点111之前的消息: {new_messages}")
-
- return new_messages
-
- """主要观察循环"""
-
- async def _update_loop(self):
- """更新循环"""
- # try:
- # start_time = time.time()
- # messages = await self._fetch_new_messages_before(start_time)
- # for message in messages:
- # await self._add_message_to_history(message)
- # logger.debug(f"[私聊][{self.private_name}]缓冲消息: {messages}")
- # except Exception as e:
- # logger.error(f"[私聊][{self.private_name}]缓冲消息出错: {e}")
-
- while self._running:
- try:
- # 等待事件或超时(1秒)
- try:
- # print("等待事件")
- await asyncio.wait_for(self._update_event.wait(), timeout=1)
-
- except asyncio.TimeoutError:
- # print("超时")
- pass # 超时后也执行一次检查
-
- self._update_event.clear() # 重置触发事件
- self._update_complete.clear() # 重置完成事件
-
- # 获取新消息
- new_messages = await self._fetch_new_messages()
-
- if new_messages:
- # 处理新消息
- for message in new_messages:
- await self._add_message_to_history(message)
-
- # 设置完成事件
- self._update_complete.set()
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]更新循环出错: {e}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
- self._update_complete.set() # 即使出错也要设置完成事件
-
- def trigger_update(self):
- """触发一次立即更新"""
- self._update_event.set()
-
- async def wait_for_update(self, timeout: float = 5.0) -> bool:
- """等待更新完成
-
- Args:
- timeout: 超时时间(秒)
-
- Returns:
- bool: 是否成功完成更新(False表示超时)
- """
- try:
- await asyncio.wait_for(self._update_complete.wait(), timeout=timeout)
- return True
- except asyncio.TimeoutError:
- logger.warning(f"[私聊][{self.private_name}]等待更新完成超时({timeout}秒)")
- return False
-
- def start(self):
- """启动观察器"""
- if self._running:
- return
-
- self._running = True
- self._task = asyncio.create_task(self._update_loop())
- logger.debug(f"[私聊][{self.private_name}]ChatObserver for {self.stream_id} started")
-
- def stop(self):
- """停止观察器"""
- self._running = False
- self._update_event.set() # 设置事件以解除等待
- self._update_complete.set() # 设置完成事件以解除等待
- if self._task:
- self._task.cancel()
- logger.debug(f"[私聊][{self.private_name}]ChatObserver for {self.stream_id} stopped")
-
- async def process_chat_history(self, messages: list):
- """处理聊天历史
-
- Args:
- messages: 消息列表
- """
- self.update_check_time()
-
- for msg in messages:
- try:
- user_info = UserInfo.from_dict(msg.get("user_info", {}))
- if user_info.user_id == global_config.bot.qq_account:
- self.update_bot_speak_time(msg["time"])
- else:
- self.update_user_speak_time(msg["time"])
- except Exception as e:
- logger.warning(f"[私聊][{self.private_name}]处理消息时间时出错: {e}")
- continue
-
- def update_check_time(self):
- """更新查看时间"""
- self.last_check_time = time.time()
-
- def update_bot_speak_time(self, speak_time: Optional[float] = None):
- """更新机器人说话时间"""
- self.last_bot_speak_time = speak_time or time.time()
-
- def update_user_speak_time(self, speak_time: Optional[float] = None):
- """更新用户说话时间"""
- self.last_user_speak_time = speak_time or time.time()
-
- def get_time_info(self) -> str:
- """获取时间信息文本"""
- current_time = time.time()
- time_info = ""
-
- if self.last_bot_speak_time:
- bot_speak_ago = current_time - self.last_bot_speak_time
- time_info += f"\n距离你上次发言已经过去了{int(bot_speak_ago)}秒"
-
- if self.last_user_speak_time:
- user_speak_ago = current_time - self.last_user_speak_time
- time_info += f"\n距离对方上次发言已经过去了{int(user_speak_ago)}秒"
-
- return time_info
-
- def get_cached_messages(self, limit: int = 50) -> List[Dict[str, Any]]:
- """获取缓存的消息历史
-
- Args:
- limit: 获取的最大消息数量,默认50
-
- Returns:
- List[Dict[str, Any]]: 缓存的消息历史列表
- """
- return self.message_cache[-limit:]
-
- def get_last_message(self) -> Optional[Dict[str, Any]]:
- """获取最后一条消息
-
- Returns:
- Optional[Dict[str, Any]]: 最后一条消息,如果没有则返回None
- """
- if not self.message_cache:
- return None
- return self.message_cache[-1]
-
- def __str__(self):
- return f"ChatObserver for {self.stream_id}"
diff --git a/src/experimental/PFC/chat_states.py b/src/experimental/PFC/chat_states.py
deleted file mode 100644
index 4b839b7b..00000000
--- a/src/experimental/PFC/chat_states.py
+++ /dev/null
@@ -1,290 +0,0 @@
-from enum import Enum, auto
-from typing import Optional, Dict, Any, List, Set
-from dataclasses import dataclass
-from datetime import datetime
-from abc import ABC, abstractmethod
-
-
-class ChatState(Enum):
- """聊天状态枚举"""
-
- NORMAL = auto() # 正常状态
- NEW_MESSAGE = auto() # 有新消息
- COLD_CHAT = auto() # 冷场状态
- ACTIVE_CHAT = auto() # 活跃状态
- BOT_SPEAKING = auto() # 机器人正在说话
- USER_SPEAKING = auto() # 用户正在说话
- SILENT = auto() # 沉默状态
- ERROR = auto() # 错误状态
-
-
-class NotificationType(Enum):
- """通知类型枚举"""
-
- NEW_MESSAGE = auto() # 新消息通知
- COLD_CHAT = auto() # 冷场通知
- ACTIVE_CHAT = auto() # 活跃通知
- BOT_SPEAKING = auto() # 机器人说话通知
- USER_SPEAKING = auto() # 用户说话通知
- MESSAGE_DELETED = auto() # 消息删除通知
- USER_JOINED = auto() # 用户加入通知
- USER_LEFT = auto() # 用户离开通知
- ERROR = auto() # 错误通知
-
-
-@dataclass
-class ChatStateInfo:
- """聊天状态信息"""
-
- state: ChatState
- last_message_time: Optional[float] = None
- last_message_content: Optional[str] = None
- last_speaker: Optional[str] = None
- message_count: int = 0
- cold_duration: float = 0.0 # 冷场持续时间(秒)
- active_duration: float = 0.0 # 活跃持续时间(秒)
-
-
-@dataclass
-class Notification:
- """通知基类"""
-
- type: NotificationType
- timestamp: float
- sender: str # 发送者标识
- target: str # 接收者标识
- data: Dict[str, Any]
-
- def to_dict(self) -> Dict[str, Any]:
- """转换为字典格式"""
- return {"type": self.type.name, "timestamp": self.timestamp, "data": self.data}
-
-
-@dataclass
-class StateNotification(Notification):
- """持续状态通知"""
-
- is_active: bool = True
-
- def to_dict(self) -> Dict[str, Any]:
- base_dict = super().to_dict()
- base_dict["is_active"] = self.is_active
- return base_dict
-
-
-class NotificationHandler(ABC):
- """通知处理器接口"""
-
- @abstractmethod
- async def handle_notification(self, notification: Notification):
- """处理通知"""
- pass
-
-
-class NotificationManager:
- """通知管理器"""
-
- def __init__(self):
- # 按接收者和通知类型存储处理器
- self._handlers: Dict[str, Dict[NotificationType, List[NotificationHandler]]] = {}
- self._active_states: Set[NotificationType] = set()
- self._notification_history: List[Notification] = []
-
- def register_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler):
- """注册通知处理器
-
- Args:
- target: 接收者标识(例如:"pfc")
- notification_type: 要处理的通知类型
- handler: 处理器实例
- """
- if target not in self._handlers:
- self._handlers[target] = {}
- if notification_type not in self._handlers[target]:
- self._handlers[target][notification_type] = []
- # print(self._handlers[target][notification_type])
- self._handlers[target][notification_type].append(handler)
- # print(self._handlers[target][notification_type])
-
- def unregister_handler(self, target: str, notification_type: NotificationType, handler: NotificationHandler):
- """注销通知处理器
-
- Args:
- target: 接收者标识
- notification_type: 通知类型
- handler: 要注销的处理器实例
- """
- if target in self._handlers and notification_type in self._handlers[target]:
- handlers = self._handlers[target][notification_type]
- if handler in handlers:
- handlers.remove(handler)
- # 如果该类型的处理器列表为空,删除该类型
- if not handlers:
- del self._handlers[target][notification_type]
- # 如果该目标没有任何处理器,删除该目标
- if not self._handlers[target]:
- del self._handlers[target]
-
- async def send_notification(self, notification: Notification):
- """发送通知"""
- self._notification_history.append(notification)
-
- # 如果是状态通知,更新活跃状态
- if isinstance(notification, StateNotification):
- if notification.is_active:
- self._active_states.add(notification.type)
- else:
- self._active_states.discard(notification.type)
-
- # 调用目标接收者的处理器
- target = notification.target
- if target in self._handlers:
- handlers = self._handlers[target].get(notification.type, [])
- # print(handlers)
- for handler in handlers:
- # print(f"调用处理器: {handler}")
- await handler.handle_notification(notification)
-
- def get_active_states(self) -> Set[NotificationType]:
- """获取当前活跃的状态"""
- return self._active_states.copy()
-
- def is_state_active(self, state_type: NotificationType) -> bool:
- """检查特定状态是否活跃"""
- return state_type in self._active_states
-
- def get_notification_history(
- self, sender: Optional[str] = None, target: Optional[str] = None, limit: Optional[int] = None
- ) -> List[Notification]:
- """获取通知历史
-
- Args:
- sender: 过滤特定发送者的通知
- target: 过滤特定接收者的通知
- limit: 限制返回数量
- """
- history = self._notification_history
-
- if sender:
- history = [n for n in history if n.sender == sender]
- if target:
- history = [n for n in history if n.target == target]
-
- if limit is not None:
- history = history[-limit:]
-
- return history
-
- def __str__(self):
- str = ""
- for target, handlers in self._handlers.items():
- for notification_type, handler_list in handlers.items():
- str += f"NotificationManager for {target} {notification_type} {handler_list}"
- return str
-
-
-# 一些常用的通知创建函数
-def create_new_message_notification(sender: str, target: str, message: Dict[str, Any]) -> Notification:
- """创建新消息通知"""
- return Notification(
- type=NotificationType.NEW_MESSAGE,
- timestamp=datetime.now().timestamp(),
- sender=sender,
- target=target,
- data={
- "message_id": message.get("message_id"),
- "processed_plain_text": message.get("processed_plain_text"),
- "detailed_plain_text": message.get("detailed_plain_text"),
- "user_info": message.get("user_info"),
- "time": message.get("time"),
- },
- )
-
-
-def create_cold_chat_notification(sender: str, target: str, is_cold: bool) -> StateNotification:
- """创建冷场状态通知"""
- return StateNotification(
- type=NotificationType.COLD_CHAT,
- timestamp=datetime.now().timestamp(),
- sender=sender,
- target=target,
- data={"is_cold": is_cold},
- is_active=is_cold,
- )
-
-
-def create_active_chat_notification(sender: str, target: str, is_active: bool) -> StateNotification:
- """创建活跃状态通知"""
- return StateNotification(
- type=NotificationType.ACTIVE_CHAT,
- timestamp=datetime.now().timestamp(),
- sender=sender,
- target=target,
- data={"is_active": is_active},
- is_active=is_active,
- )
-
-
-class ChatStateManager:
- """聊天状态管理器"""
-
- def __init__(self):
- self.current_state = ChatState.NORMAL
- self.state_info = ChatStateInfo(state=ChatState.NORMAL)
- self.state_history: list[ChatStateInfo] = []
-
- def update_state(self, new_state: ChatState, **kwargs):
- """更新聊天状态
-
- Args:
- new_state: 新的状态
- **kwargs: 其他状态信息
- """
- self.current_state = new_state
- self.state_info.state = new_state
-
- # 更新其他状态信息
- for key, value in kwargs.items():
- if hasattr(self.state_info, key):
- setattr(self.state_info, key, value)
-
- # 记录状态历史
- self.state_history.append(self.state_info)
-
- def get_current_state_info(self) -> ChatStateInfo:
- """获取当前状态信息"""
- return self.state_info
-
- def get_state_history(self) -> list[ChatStateInfo]:
- """获取状态历史"""
- return self.state_history
-
- def is_cold_chat(self, threshold: float = 60.0) -> bool:
- """判断是否处于冷场状态
-
- Args:
- threshold: 冷场阈值(秒)
-
- Returns:
- bool: 是否冷场
- """
- if not self.state_info.last_message_time:
- return True
-
- current_time = datetime.now().timestamp()
- return (current_time - self.state_info.last_message_time) > threshold
-
- def is_active_chat(self, threshold: float = 5.0) -> bool:
- """判断是否处于活跃状态
-
- Args:
- threshold: 活跃阈值(秒)
-
- Returns:
- bool: 是否活跃
- """
- if not self.state_info.last_message_time:
- return False
-
- current_time = datetime.now().timestamp()
- return (current_time - self.state_info.last_message_time) <= threshold
diff --git a/src/experimental/PFC/conversation.py b/src/experimental/PFC/conversation.py
deleted file mode 100644
index 9be05517..00000000
--- a/src/experimental/PFC/conversation.py
+++ /dev/null
@@ -1,701 +0,0 @@
-import time
-import asyncio
-import datetime
-
-# from .message_storage import MongoDBMessageStorage
-from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_before_timestamp_with_chat
-
-# from ...config.config import global_config
-from typing import Dict, Any, Optional
-from src.chat.message_receive.message import Message
-from .pfc_types import ConversationState
-from .pfc import ChatObserver, GoalAnalyzer
-from .message_sender import DirectMessageSender
-from src.common.logger import get_logger
-from .action_planner import ActionPlanner
-from .observation_info import ObservationInfo
-from .conversation_info import ConversationInfo # 确保导入 ConversationInfo
-from .reply_generator import ReplyGenerator
-from src.chat.message_receive.chat_stream import ChatStream
-from src.chat.message_receive.message import UserInfo
-from src.chat.message_receive.chat_stream import get_chat_manager
-from .pfc_KnowledgeFetcher import KnowledgeFetcher
-from .waiter import Waiter
-
-import traceback
-from rich.traceback import install
-
-install(extra_lines=3)
-
-logger = get_logger("pfc")
-
-
-class Conversation:
- """对话类,负责管理单个对话的状态和行为"""
-
- def __init__(self, stream_id: str, private_name: str):
- """初始化对话实例
-
- Args:
- stream_id: 聊天流ID
- """
- self.stream_id = stream_id
- self.private_name = private_name
- self.state = ConversationState.INIT
- self.should_continue = False
- self.ignore_until_timestamp: Optional[float] = None
-
- # 回复相关
- self.generated_reply = ""
-
- async def _initialize(self):
- """初始化实例,注册所有组件"""
-
- try:
- self.action_planner = ActionPlanner(self.stream_id, self.private_name)
- self.goal_analyzer = GoalAnalyzer(self.stream_id, self.private_name)
- self.reply_generator = ReplyGenerator(self.stream_id, self.private_name)
- self.knowledge_fetcher = KnowledgeFetcher(self.private_name)
- self.waiter = Waiter(self.stream_id, self.private_name)
- self.direct_sender = DirectMessageSender(self.private_name)
-
- # 获取聊天流信息
- self.chat_stream = get_chat_manager().get_stream(self.stream_id)
-
- self.stop_action_planner = False
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]初始化对话实例:注册运行组件失败: {e}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
- raise
-
- try:
- # 决策所需要的信息,包括自身自信和观察信息两部分
- # 注册观察器和观测信息
- self.chat_observer = ChatObserver.get_instance(self.stream_id, self.private_name)
- self.chat_observer.start()
- self.observation_info = ObservationInfo(self.private_name)
- self.observation_info.bind_to_chat_observer(self.chat_observer)
- # print(self.chat_observer.get_cached_messages(limit=)
-
- self.conversation_info = ConversationInfo()
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]初始化对话实例:注册信息组件失败: {e}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
- raise
- try:
- logger.info(f"[私聊][{self.private_name}]为 {self.stream_id} 加载初始聊天记录...")
- initial_messages = get_raw_msg_before_timestamp_with_chat( #
- chat_id=self.stream_id,
- timestamp=time.time(),
- limit=30, # 加载最近30条作为初始上下文,可以调整
- )
- chat_talking_prompt = build_readable_messages(
- initial_messages,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0,
- )
- if initial_messages:
- # 将加载的消息填充到 ObservationInfo 的 chat_history
- self.observation_info.chat_history = initial_messages
- self.observation_info.chat_history_str = chat_talking_prompt + "\n"
- self.observation_info.chat_history_count = len(initial_messages)
-
- # 更新 ObservationInfo 中的时间戳等信息
- last_msg = initial_messages[-1]
- self.observation_info.last_message_time = last_msg.get("time")
- last_user_info = UserInfo.from_dict(last_msg.get("user_info", {}))
- self.observation_info.last_message_sender = last_user_info.user_id
- self.observation_info.last_message_content = last_msg.get("processed_plain_text", "")
-
- logger.info(
- f"[私聊][{self.private_name}]成功加载 {len(initial_messages)} 条初始聊天记录。最后一条消息时间: {self.observation_info.last_message_time}"
- )
-
- # 让 ChatObserver 从加载的最后一条消息之后开始同步
- self.chat_observer.last_message_time = self.observation_info.last_message_time
- self.chat_observer.last_message_read = last_msg # 更新 observer 的最后读取记录
- else:
- logger.info(f"[私聊][{self.private_name}]没有找到初始聊天记录。")
-
- except Exception as load_err:
- logger.error(f"[私聊][{self.private_name}]加载初始聊天记录时出错: {load_err}")
- # 出错也要继续,只是没有历史记录而已
- # 组件准备完成,启动该论对话
- self.should_continue = True
- asyncio.create_task(self.start())
-
- async def start(self):
- """开始对话流程"""
- try:
- logger.info(f"[私聊][{self.private_name}]对话系统启动中...")
- asyncio.create_task(self._plan_and_action_loop())
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]启动对话系统失败: {e}")
- raise
-
- async def _plan_and_action_loop(self):
- """思考步,PFC核心循环模块"""
- while self.should_continue:
- # 忽略逻辑
- if self.ignore_until_timestamp and time.time() < self.ignore_until_timestamp:
- await asyncio.sleep(30)
- continue
- elif self.ignore_until_timestamp and time.time() >= self.ignore_until_timestamp:
- logger.info(f"[私聊][{self.private_name}]忽略时间已到 {self.stream_id},准备结束对话。")
- self.ignore_until_timestamp = None
- self.should_continue = False
- continue
- try:
- # --- 在规划前记录当前新消息数量 ---
- initial_new_message_count = 0
- if hasattr(self.observation_info, "new_messages_count"):
- initial_new_message_count = self.observation_info.new_messages_count + 1 # 算上麦麦自己发的那一条
- else:
- logger.warning(
- f"[私聊][{self.private_name}]ObservationInfo missing 'new_messages_count' before planning."
- )
-
- # --- 调用 Action Planner ---
- # 传递 self.conversation_info.last_successful_reply_action
- action, reason = await self.action_planner.plan(
- self.observation_info, self.conversation_info, self.conversation_info.last_successful_reply_action
- )
-
- # --- 规划后检查是否有 *更多* 新消息到达 ---
- current_new_message_count = 0
- if hasattr(self.observation_info, "new_messages_count"):
- current_new_message_count = self.observation_info.new_messages_count
- else:
- logger.warning(
- f"[私聊][{self.private_name}]ObservationInfo missing 'new_messages_count' after planning."
- )
-
- if current_new_message_count > initial_new_message_count + 2:
- logger.info(
- f"[私聊][{self.private_name}]规划期间发现新增消息 ({initial_new_message_count} -> {current_new_message_count}),跳过本次行动,重新规划"
- )
- # 如果规划期间有新消息,也应该重置上次回复状态,因为现在要响应新消息了
- self.conversation_info.last_successful_reply_action = None
- await asyncio.sleep(0.1)
- continue
-
- # 包含 send_new_message
- if initial_new_message_count > 0 and action in ["direct_reply", "send_new_message"]:
- if hasattr(self.observation_info, "clear_unprocessed_messages"):
- logger.debug(
- f"[私聊][{self.private_name}]准备执行 {action},清理 {initial_new_message_count} 条规划时已知的新消息。"
- )
- await self.observation_info.clear_unprocessed_messages()
- if hasattr(self.observation_info, "new_messages_count"):
- self.observation_info.new_messages_count = 0
- else:
- logger.error(
- f"[私聊][{self.private_name}]无法清理未处理消息: ObservationInfo 缺少 clear_unprocessed_messages 方法!"
- )
-
- await self._handle_action(action, reason, self.observation_info, self.conversation_info)
-
- # 检查是否需要结束对话 (逻辑不变)
- goal_ended = False
- if hasattr(self.conversation_info, "goal_list") and self.conversation_info.goal_list:
- for goal_item in self.conversation_info.goal_list:
- if isinstance(goal_item, dict):
- current_goal = goal_item.get("goal")
-
- if current_goal == "结束对话":
- goal_ended = True
- break
-
- if goal_ended:
- self.should_continue = False
- logger.info(f"[私聊][{self.private_name}]检测到'结束对话'目标,停止循环。")
-
- except Exception as loop_err:
- logger.error(f"[私聊][{self.private_name}]PFC主循环出错: {loop_err}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
- await asyncio.sleep(1)
-
- if self.should_continue:
- await asyncio.sleep(0.1)
-
- logger.info(f"[私聊][{self.private_name}]PFC 循环结束 for stream_id: {self.stream_id}")
-
- def _check_new_messages_after_planning(self):
- """检查在规划后是否有新消息"""
- # 检查 ObservationInfo 是否已初始化并且有 new_messages_count 属性
- if not hasattr(self, "observation_info") or not hasattr(self.observation_info, "new_messages_count"):
- logger.warning(
- f"[私聊][{self.private_name}]ObservationInfo 未初始化或缺少 'new_messages_count' 属性,无法检查新消息。"
- )
- return False # 或者根据需要抛出错误
-
- if self.observation_info.new_messages_count > 2:
- logger.info(
- f"[私聊][{self.private_name}]生成/执行动作期间收到 {self.observation_info.new_messages_count} 条新消息,取消当前动作并重新规划"
- )
- # 如果有新消息,也应该重置上次回复状态
- if hasattr(self, "conversation_info"): # 确保 conversation_info 已初始化
- self.conversation_info.last_successful_reply_action = None
- else:
- logger.warning(
- f"[私聊][{self.private_name}]ConversationInfo 未初始化,无法重置 last_successful_reply_action。"
- )
- return True
- return False
-
- def _convert_to_message(self, msg_dict: Dict[str, Any]) -> Message:
- """将消息字典转换为Message对象"""
- try:
- # 尝试从 msg_dict 直接获取 chat_stream,如果失败则从全局 get_chat_manager 获取
- chat_info = msg_dict.get("chat_info")
- if chat_info and isinstance(chat_info, dict):
- chat_stream = ChatStream.from_dict(chat_info)
- elif self.chat_stream: # 使用实例变量中的 chat_stream
- chat_stream = self.chat_stream
- else: # Fallback: 尝试从 manager 获取 (可能需要 stream_id)
- chat_stream = get_chat_manager().get_stream(self.stream_id)
- if not chat_stream:
- raise ValueError(f"无法确定 ChatStream for stream_id {self.stream_id}")
-
- user_info = UserInfo.from_dict(msg_dict.get("user_info", {}))
-
- return Message(
- message_id=msg_dict.get("message_id", f"gen_{time.time()}"), # 提供默认 ID
- chat_stream=chat_stream, # 使用确定的 chat_stream
- time=msg_dict.get("time", time.time()), # 提供默认时间
- user_info=user_info,
- processed_plain_text=msg_dict.get("processed_plain_text", ""),
- detailed_plain_text=msg_dict.get("detailed_plain_text", ""),
- )
- except Exception as e:
- logger.warning(f"[私聊][{self.private_name}]转换消息时出错: {e}")
- # 可以选择返回 None 或重新抛出异常,这里选择重新抛出以指示问题
- raise ValueError(f"无法将字典转换为 Message 对象: {e}") from e
-
- async def _handle_action(
- self, action: str, reason: str, observation_info: ObservationInfo, conversation_info: ConversationInfo
- ):
- """处理规划的行动"""
-
- logger.debug(f"[私聊][{self.private_name}]执行行动: {action}, 原因: {reason}")
-
- # 记录action历史 (逻辑不变)
- current_action_record = {
- "action": action,
- "plan_reason": reason,
- "status": "start",
- "time": datetime.datetime.now().strftime("%H:%M:%S"),
- "final_reason": None,
- }
- # 确保 done_action 列表存在
- if not hasattr(conversation_info, "done_action"):
- conversation_info.done_action = []
- conversation_info.done_action.append(current_action_record)
- action_index = len(conversation_info.done_action) - 1
-
- action_successful = False # 用于标记动作是否成功完成
-
- # --- 根据不同的 action 执行 ---
-
- # send_new_message 失败后执行 wait
- if action == "send_new_message":
- max_reply_attempts = 3
- reply_attempt_count = 0
- is_suitable = False
- need_replan = False
- check_reason = "未进行尝试"
- final_reply_to_send = ""
-
- while reply_attempt_count < max_reply_attempts and not is_suitable:
- reply_attempt_count += 1
- logger.info(
- f"[私聊][{self.private_name}]尝试生成追问回复 (第 {reply_attempt_count}/{max_reply_attempts} 次)..."
- )
- self.state = ConversationState.GENERATING
-
- # 1. 生成回复 (调用 generate 时传入 action_type)
- self.generated_reply = await self.reply_generator.generate(
- observation_info, conversation_info, action_type="send_new_message"
- )
- logger.info(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次生成的追问回复: {self.generated_reply}"
- )
-
- # 2. 检查回复 (逻辑不变)
- self.state = ConversationState.CHECKING
- try:
- current_goal_str = conversation_info.goal_list[0]["goal"] if conversation_info.goal_list else ""
- is_suitable, check_reason, need_replan = await self.reply_generator.check_reply(
- reply=self.generated_reply,
- goal=current_goal_str,
- chat_history=observation_info.chat_history,
- chat_history_str=observation_info.chat_history_str,
- retry_count=reply_attempt_count - 1,
- )
- logger.info(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次追问检查结果: 合适={is_suitable}, 原因='{check_reason}', 需重新规划={need_replan}"
- )
- if is_suitable:
- final_reply_to_send = self.generated_reply
- break
- elif need_replan:
- logger.warning(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次追问检查建议重新规划,停止尝试。原因: {check_reason}"
- )
- break
- except Exception as check_err:
- logger.error(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次调用 ReplyChecker (追问) 时出错: {check_err}"
- )
- check_reason = f"第 {reply_attempt_count} 次检查过程出错: {check_err}"
- break
-
- # 循环结束,处理最终结果
- if is_suitable:
- # 检查是否有新消息
- if self._check_new_messages_after_planning():
- logger.info(f"[私聊][{self.private_name}]生成追问回复期间收到新消息,取消发送,重新规划行动")
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"有新消息,取消发送追问: {final_reply_to_send}"}
- )
- return # 直接返回,重新规划
-
- # 发送合适的回复
- self.generated_reply = final_reply_to_send
- # --- 在这里调用 _send_reply ---
- await self._send_reply() # <--- 调用恢复后的函数
-
- # 更新状态: 标记上次成功是 send_new_message
- self.conversation_info.last_successful_reply_action = "send_new_message"
- action_successful = True # 标记动作成功
-
- elif need_replan:
- # 打回动作决策
- logger.warning(
- f"[私聊][{self.private_name}]经过 {reply_attempt_count} 次尝试,追问回复决定打回动作决策。打回原因: {check_reason}"
- )
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"追问尝试{reply_attempt_count}次后打回: {check_reason}"}
- )
-
- else:
- # 追问失败
- logger.warning(
- f"[私聊][{self.private_name}]经过 {reply_attempt_count} 次尝试,未能生成合适的追问回复。最终原因: {check_reason}"
- )
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"追问尝试{reply_attempt_count}次后失败: {check_reason}"}
- )
- # 重置状态: 追问失败,下次用初始 prompt
- self.conversation_info.last_successful_reply_action = None
-
- # 执行 Wait 操作
- logger.info(f"[私聊][{self.private_name}]由于无法生成合适追问回复,执行 'wait' 操作...")
- self.state = ConversationState.WAITING
- await self.waiter.wait(self.conversation_info)
- wait_action_record = {
- "action": "wait",
- "plan_reason": "因 send_new_message 多次尝试失败而执行的后备等待",
- "status": "done",
- "time": datetime.datetime.now().strftime("%H:%M:%S"),
- "final_reason": None,
- }
- conversation_info.done_action.append(wait_action_record)
-
- elif action == "direct_reply":
- max_reply_attempts = 3
- reply_attempt_count = 0
- is_suitable = False
- need_replan = False
- check_reason = "未进行尝试"
- final_reply_to_send = ""
-
- while reply_attempt_count < max_reply_attempts and not is_suitable:
- reply_attempt_count += 1
- logger.info(
- f"[私聊][{self.private_name}]尝试生成首次回复 (第 {reply_attempt_count}/{max_reply_attempts} 次)..."
- )
- self.state = ConversationState.GENERATING
-
- # 1. 生成回复
- self.generated_reply = await self.reply_generator.generate(
- observation_info, conversation_info, action_type="direct_reply"
- )
- logger.info(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次生成的首次回复: {self.generated_reply}"
- )
-
- # 2. 检查回复
- self.state = ConversationState.CHECKING
- try:
- current_goal_str = conversation_info.goal_list[0]["goal"] if conversation_info.goal_list else ""
- is_suitable, check_reason, need_replan = await self.reply_generator.check_reply(
- reply=self.generated_reply,
- goal=current_goal_str,
- chat_history=observation_info.chat_history,
- chat_history_str=observation_info.chat_history_str,
- retry_count=reply_attempt_count - 1,
- )
- logger.info(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次首次回复检查结果: 合适={is_suitable}, 原因='{check_reason}', 需重新规划={need_replan}"
- )
- if is_suitable:
- final_reply_to_send = self.generated_reply
- break
- elif need_replan:
- logger.warning(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次首次回复检查建议重新规划,停止尝试。原因: {check_reason}"
- )
- break
- except Exception as check_err:
- logger.error(
- f"[私聊][{self.private_name}]第 {reply_attempt_count} 次调用 ReplyChecker (首次回复) 时出错: {check_err}"
- )
- check_reason = f"第 {reply_attempt_count} 次检查过程出错: {check_err}"
- break
-
- # 循环结束,处理最终结果
- if is_suitable:
- # 检查是否有新消息
- if self._check_new_messages_after_planning():
- logger.info(f"[私聊][{self.private_name}]生成首次回复期间收到新消息,取消发送,重新规划行动")
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"有新消息,取消发送首次回复: {final_reply_to_send}"}
- )
- return # 直接返回,重新规划
-
- # 发送合适的回复
- self.generated_reply = final_reply_to_send
- # --- 在这里调用 _send_reply ---
- await self._send_reply() # <--- 调用恢复后的函数
-
- # 更新状态: 标记上次成功是 direct_reply
- self.conversation_info.last_successful_reply_action = "direct_reply"
- action_successful = True # 标记动作成功
-
- elif need_replan:
- # 打回动作决策
- logger.warning(
- f"[私聊][{self.private_name}]经过 {reply_attempt_count} 次尝试,首次回复决定打回动作决策。打回原因: {check_reason}"
- )
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"首次回复尝试{reply_attempt_count}次后打回: {check_reason}"}
- )
-
- else:
- # 首次回复失败
- logger.warning(
- f"[私聊][{self.private_name}]经过 {reply_attempt_count} 次尝试,未能生成合适的首次回复。最终原因: {check_reason}"
- )
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"首次回复尝试{reply_attempt_count}次后失败: {check_reason}"}
- )
- # 重置状态: 首次回复失败,下次还是用初始 prompt
- self.conversation_info.last_successful_reply_action = None
-
- # 执行 Wait 操作 (保持原有逻辑)
- logger.info(f"[私聊][{self.private_name}]由于无法生成合适首次回复,执行 'wait' 操作...")
- self.state = ConversationState.WAITING
- await self.waiter.wait(self.conversation_info)
- wait_action_record = {
- "action": "wait",
- "plan_reason": "因 direct_reply 多次尝试失败而执行的后备等待",
- "status": "done",
- "time": datetime.datetime.now().strftime("%H:%M:%S"),
- "final_reason": None,
- }
- conversation_info.done_action.append(wait_action_record)
-
- elif action == "fetch_knowledge":
- self.state = ConversationState.FETCHING
- knowledge_query = reason
- try:
- # 检查 knowledge_fetcher 是否存在
- if not hasattr(self, "knowledge_fetcher"):
- logger.error(f"[私聊][{self.private_name}]KnowledgeFetcher 未初始化,无法获取知识。")
- raise AttributeError("KnowledgeFetcher not initialized")
-
- knowledge, source = await self.knowledge_fetcher.fetch(knowledge_query, observation_info.chat_history)
- logger.info(f"[私聊][{self.private_name}]获取到知识: {knowledge[:100]}..., 来源: {source}")
- if knowledge:
- # 确保 knowledge_list 存在
- if not hasattr(conversation_info, "knowledge_list"):
- conversation_info.knowledge_list = []
- conversation_info.knowledge_list.append(
- {"query": knowledge_query, "knowledge": knowledge, "source": source}
- )
- action_successful = True
- except Exception as fetch_err:
- logger.error(f"[私聊][{self.private_name}]获取知识时出错: {str(fetch_err)}")
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"获取知识失败: {str(fetch_err)}"}
- )
- self.conversation_info.last_successful_reply_action = None # 重置状态
-
- elif action == "rethink_goal":
- self.state = ConversationState.RETHINKING
- try:
- # 检查 goal_analyzer 是否存在
- if not hasattr(self, "goal_analyzer"):
- logger.error(f"[私聊][{self.private_name}]GoalAnalyzer 未初始化,无法重新思考目标。")
- raise AttributeError("GoalAnalyzer not initialized")
- await self.goal_analyzer.analyze_goal(conversation_info, observation_info)
- action_successful = True
- except Exception as rethink_err:
- logger.error(f"[私聊][{self.private_name}]重新思考目标时出错: {rethink_err}")
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"重新思考目标失败: {rethink_err}"}
- )
- self.conversation_info.last_successful_reply_action = None # 重置状态
-
- elif action == "listening":
- self.state = ConversationState.LISTENING
- logger.info(f"[私聊][{self.private_name}]倾听对方发言...")
- try:
- # 检查 waiter 是否存在
- if not hasattr(self, "waiter"):
- logger.error(f"[私聊][{self.private_name}]Waiter 未初始化,无法倾听。")
- raise AttributeError("Waiter not initialized")
- await self.waiter.wait_listening(conversation_info)
- action_successful = True # Listening 完成就算成功
- except Exception as listen_err:
- logger.error(f"[私聊][{self.private_name}]倾听时出错: {listen_err}")
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"倾听失败: {listen_err}"}
- )
- self.conversation_info.last_successful_reply_action = None # 重置状态
-
- elif action == "say_goodbye":
- self.state = ConversationState.GENERATING # 也可以定义一个新的状态,如 ENDING
- logger.info(f"[私聊][{self.private_name}]执行行动: 生成并发送告别语...")
- try:
- # 1. 生成告别语 (使用 'say_goodbye' action_type)
- self.generated_reply = await self.reply_generator.generate(
- observation_info, conversation_info, action_type="say_goodbye"
- )
- logger.info(f"[私聊][{self.private_name}]生成的告别语: {self.generated_reply}")
-
- # 2. 直接发送告别语 (不经过检查)
- if self.generated_reply: # 确保生成了内容
- await self._send_reply() # 调用发送方法
- # 发送成功后,标记动作成功
- action_successful = True
- logger.info(f"[私聊][{self.private_name}]告别语已发送。")
- else:
- logger.warning(f"[私聊][{self.private_name}]未能生成告别语内容,无法发送。")
- action_successful = False # 标记动作失败
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": "未能生成告别语内容"}
- )
-
- # 3. 无论是否发送成功,都准备结束对话
- self.should_continue = False
- logger.info(f"[私聊][{self.private_name}]发送告别语流程结束,即将停止对话实例。")
-
- except Exception as goodbye_err:
- logger.error(f"[私聊][{self.private_name}]生成或发送告别语时出错: {goodbye_err}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
- # 即使出错,也结束对话
- self.should_continue = False
- action_successful = False # 标记动作失败
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"生成或发送告别语时出错: {goodbye_err}"}
- )
-
- elif action == "end_conversation":
- # 这个分支现在只会在 action_planner 最终决定不告别时被调用
- self.should_continue = False
- logger.info(f"[私聊][{self.private_name}]收到最终结束指令,停止对话...")
- action_successful = True # 标记这个指令本身是成功的
-
- elif action == "block_and_ignore":
- logger.info(f"[私聊][{self.private_name}]不想再理你了...")
- ignore_duration_seconds = 10 * 60
- self.ignore_until_timestamp = time.time() + ignore_duration_seconds
- logger.info(
- f"[私聊][{self.private_name}]将忽略此对话直到: {datetime.datetime.fromtimestamp(self.ignore_until_timestamp)}"
- )
- self.state = ConversationState.IGNORED
- action_successful = True # 标记动作成功
-
- else: # 对应 'wait' 动作
- self.state = ConversationState.WAITING
- logger.info(f"[私聊][{self.private_name}]等待更多信息...")
- try:
- # 检查 waiter 是否存在
- if not hasattr(self, "waiter"):
- logger.error(f"[私聊][{self.private_name}]Waiter 未初始化,无法等待。")
- raise AttributeError("Waiter not initialized")
- _timeout_occurred = await self.waiter.wait(self.conversation_info)
- action_successful = True # Wait 完成就算成功
- except Exception as wait_err:
- logger.error(f"[私聊][{self.private_name}]等待时出错: {wait_err}")
- conversation_info.done_action[action_index].update(
- {"status": "recall", "final_reason": f"等待失败: {wait_err}"}
- )
- self.conversation_info.last_successful_reply_action = None # 重置状态
-
- # --- 更新 Action History 状态 ---
- # 只有当动作本身成功时,才更新状态为 done
- if action_successful:
- conversation_info.done_action[action_index].update(
- {
- "status": "done",
- "time": datetime.datetime.now().strftime("%H:%M:%S"),
- }
- )
- # 重置状态: 对于非回复类动作的成功,清除上次回复状态
- if action not in ["direct_reply", "send_new_message"]:
- self.conversation_info.last_successful_reply_action = None
- logger.debug(f"[私聊][{self.private_name}]动作 {action} 成功完成,重置 last_successful_reply_action")
- # 如果动作是 recall 状态,在各自的处理逻辑中已经更新了 done_action
-
- async def _send_reply(self):
- """发送回复"""
- if not self.generated_reply:
- logger.warning(f"[私聊][{self.private_name}]没有生成回复内容,无法发送。")
- return
-
- try:
- _current_time = time.time()
- reply_content = self.generated_reply
-
- # 发送消息 (确保 direct_sender 和 chat_stream 有效)
- if not hasattr(self, "direct_sender") or not self.direct_sender:
- logger.error(f"[私聊][{self.private_name}]DirectMessageSender 未初始化,无法发送回复。")
- return
- if not self.chat_stream:
- logger.error(f"[私聊][{self.private_name}]ChatStream 未初始化,无法发送回复。")
- return
-
- await self.direct_sender.send_message(chat_stream=self.chat_stream, content=reply_content)
-
- # 发送成功后,手动触发 observer 更新可能导致重复处理自己发送的消息
- # 更好的做法是依赖 observer 的自动轮询或数据库触发器(如果支持)
- # 暂时注释掉,观察是否影响 ObservationInfo 的更新
- # self.chat_observer.trigger_update()
- # if not await self.chat_observer.wait_for_update():
- # logger.warning(f"[私聊][{self.private_name}]等待 ChatObserver 更新完成超时")
-
- self.state = ConversationState.ANALYZING # 更新状态
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]发送消息或更新状态时失败: {str(e)}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}")
- self.state = ConversationState.ANALYZING
-
- async def _send_timeout_message(self):
- """发送超时结束消息"""
- try:
- messages = self.chat_observer.get_cached_messages(limit=1)
- if not messages:
- return
-
- latest_message = self._convert_to_message(messages[0])
- await self.direct_sender.send_message(
- chat_stream=self.chat_stream, content="TODO:超时消息", reply_to_message=latest_message
- )
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]发送超时消息失败: {str(e)}")
diff --git a/src/experimental/PFC/conversation_info.py b/src/experimental/PFC/conversation_info.py
deleted file mode 100644
index 04524b69..00000000
--- a/src/experimental/PFC/conversation_info.py
+++ /dev/null
@@ -1,10 +0,0 @@
-from typing import Optional
-
-
-class ConversationInfo:
- def __init__(self):
- self.done_action = []
- self.goal_list = []
- self.knowledge_list = []
- self.memory_list = []
- self.last_successful_reply_action: Optional[str] = None
diff --git a/src/experimental/PFC/message_sender.py b/src/experimental/PFC/message_sender.py
deleted file mode 100644
index 841ebe45..00000000
--- a/src/experimental/PFC/message_sender.py
+++ /dev/null
@@ -1,81 +0,0 @@
-import time
-from typing import Optional
-from src.common.logger import get_logger
-from src.chat.message_receive.chat_stream import ChatStream
-from src.chat.message_receive.message import Message
-from maim_message import UserInfo, Seg
-from src.chat.message_receive.message import MessageSending, MessageSet
-from src.chat.message_receive.message_sender import message_manager
-from src.chat.message_receive.storage import MessageStorage
-from src.config.config import global_config
-from rich.traceback import install
-
-install(extra_lines=3)
-
-
-logger = get_logger("message_sender")
-
-
-class DirectMessageSender:
- """直接消息发送器"""
-
- def __init__(self, private_name: str):
- self.private_name = private_name
- self.storage = MessageStorage()
-
- async def send_message(
- self,
- chat_stream: ChatStream,
- content: str,
- reply_to_message: Optional[Message] = None,
- ) -> None:
- """发送消息到聊天流
-
- Args:
- chat_stream: 聊天流
- content: 消息内容
- reply_to_message: 要回复的消息(可选)
- """
- try:
- # 创建消息内容
- segments = Seg(type="seglist", data=[Seg(type="text", data=content)])
-
- # 获取麦麦的信息
- bot_user_info = UserInfo(
- user_id=global_config.bot.qq_account,
- user_nickname=global_config.bot.nickname,
- platform=chat_stream.platform,
- )
-
- # 用当前时间作为message_id,和之前那套sender一样
- message_id = f"dm{round(time.time(), 2)}"
-
- # 构建消息对象
- message = MessageSending(
- message_id=message_id,
- chat_stream=chat_stream,
- bot_user_info=bot_user_info,
- sender_info=reply_to_message.message_info.user_info if reply_to_message else None,
- message_segment=segments,
- reply=reply_to_message,
- is_head=True,
- is_emoji=False,
- thinking_start_time=time.time(),
- )
-
- # 处理消息
- await message.process()
-
- # 不知道有什么用,先留下来了,和之前那套sender一样
- _message_json = message.to_dict()
-
- # 发送消息
- message_set = MessageSet(chat_stream, message_id)
- message_set.add_message(message)
- await message_manager.add_message(message_set)
- await self.storage.store_message(message, chat_stream)
- logger.info(f"[私聊][{self.private_name}]PFC消息已发送: {content}")
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]PFC消息发送失败: {str(e)}")
- raise
diff --git a/src/experimental/PFC/message_storage.py b/src/experimental/PFC/message_storage.py
deleted file mode 100644
index 2505a06f..00000000
--- a/src/experimental/PFC/message_storage.py
+++ /dev/null
@@ -1,131 +0,0 @@
-from abc import ABC, abstractmethod
-from typing import List, Dict, Any, Callable
-
-from playhouse import shortcuts
-
-from src.common.database.database_model import Messages # Peewee Messages 模型导入
-
-model_to_dict: Callable[..., dict] = shortcuts.model_to_dict # Peewee 模型转换为字典的快捷函数
-
-
-class MessageStorage(ABC):
- """消息存储接口"""
-
- @abstractmethod
- async def get_messages_after(self, chat_id: str, message: Dict[str, Any]) -> List[Dict[str, Any]]:
- """获取指定消息ID之后的所有消息
-
- Args:
- chat_id: 聊天ID
- message: 消息
-
- Returns:
- List[Dict[str, Any]]: 消息列表
- """
- pass
-
- @abstractmethod
- async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
- """获取指定时间点之前的消息
-
- Args:
- chat_id: 聊天ID
- time_point: 时间戳
- limit: 最大消息数量
-
- Returns:
- List[Dict[str, Any]]: 消息列表
- """
- pass
-
- @abstractmethod
- async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
- """检查是否有新消息
-
- Args:
- chat_id: 聊天ID
- after_time: 时间戳
-
- Returns:
- bool: 是否有新消息
- """
- pass
-
-
-class PeeweeMessageStorage(MessageStorage):
- """Peewee消息存储实现"""
-
- async def get_messages_after(self, chat_id: str, message_time: float) -> List[Dict[str, Any]]:
- query = (
- Messages.select()
- .where((Messages.chat_id == chat_id) & (Messages.time > message_time))
- .order_by(Messages.time.asc())
- )
-
- # print(f"storage_check_message: {message_time}")
- messages_models = list(query)
- return [model_to_dict(msg) for msg in messages_models]
-
- async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
- query = (
- Messages.select()
- .where((Messages.chat_id == chat_id) & (Messages.time < time_point))
- .order_by(Messages.time.desc())
- .limit(limit)
- )
-
- messages_models = list(query)
- # 将消息按时间正序排列
- messages_models.reverse()
- return [model_to_dict(msg) for msg in messages_models]
-
- async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
- return Messages.select().where((Messages.chat_id == chat_id) & (Messages.time > after_time)).exists()
-
-
-# # 创建一个内存消息存储实现,用于测试
-# class InMemoryMessageStorage(MessageStorage):
-# """内存消息存储实现,主要用于测试"""
-
-# def __init__(self):
-# self.messages: Dict[str, List[Dict[str, Any]]] = {}
-
-# async def get_messages_after(self, chat_id: str, message_id: Optional[str] = None) -> List[Dict[str, Any]]:
-# if chat_id not in self.messages:
-# return []
-
-# messages = self.messages[chat_id]
-# if not message_id:
-# return messages
-
-# # 找到message_id的索引
-# try:
-# index = next(i for i, m in enumerate(messages) if m["message_id"] == message_id)
-# return messages[index + 1:]
-# except StopIteration:
-# return []
-
-# async def get_messages_before(self, chat_id: str, time_point: float, limit: int = 5) -> List[Dict[str, Any]]:
-# if chat_id not in self.messages:
-# return []
-
-# messages = [
-# m for m in self.messages[chat_id]
-# if m["time"] < time_point
-# ]
-
-# return messages[-limit:]
-
-# async def has_new_messages(self, chat_id: str, after_time: float) -> bool:
-# if chat_id not in self.messages:
-# return False
-
-# return any(m["time"] > after_time for m in self.messages[chat_id])
-
-# # 测试辅助方法
-# def add_message(self, chat_id: str, message: Dict[str, Any]):
-# """添加测试消息"""
-# if chat_id not in self.messages:
-# self.messages[chat_id] = []
-# self.messages[chat_id].append(message)
-# self.messages[chat_id].sort(key=lambda m: m["time"])
diff --git a/src/experimental/PFC/observation_info.py b/src/experimental/PFC/observation_info.py
deleted file mode 100644
index 5a7d72da..00000000
--- a/src/experimental/PFC/observation_info.py
+++ /dev/null
@@ -1,389 +0,0 @@
-from typing import List, Optional, Dict, Any, Set
-from maim_message import UserInfo
-import time
-from src.common.logger import get_logger
-from src.experimental.PFC.chat_observer import ChatObserver
-from src.experimental.PFC.chat_states import NotificationHandler, NotificationType, Notification
-from src.chat.utils.chat_message_builder import build_readable_messages
-import traceback # 导入 traceback 用于调试
-
-logger = get_logger("observation_info")
-
-
-class ObservationInfoHandler(NotificationHandler):
- """ObservationInfo的通知处理器"""
-
- def __init__(self, observation_info: "ObservationInfo", private_name: str):
- """初始化处理器
-
- Args:
- observation_info: 要更新的ObservationInfo实例
- private_name: 私聊对象的名称,用于日志记录
- """
- self.observation_info = observation_info
- # 将 private_name 存储在 handler 实例中
- self.private_name = private_name
-
- async def handle_notification(self, notification: Notification): # 添加类型提示
- # 获取通知类型和数据
- notification_type = notification.type
- data = notification.data
-
- try: # 添加错误处理块
- if notification_type == NotificationType.NEW_MESSAGE:
- # 处理新消息通知
- # logger.debug(f"[私聊][{self.private_name}]收到新消息通知data: {data}") # 可以在需要时取消注释
- message_id = data.get("message_id")
- processed_plain_text = data.get("processed_plain_text")
- detailed_plain_text = data.get("detailed_plain_text")
- user_info_dict = data.get("user_info") # 先获取字典
- time_value = data.get("time")
-
- # 确保 user_info 是字典类型再创建 UserInfo 对象
- user_info = None
- if isinstance(user_info_dict, dict):
- try:
- user_info = UserInfo.from_dict(user_info_dict)
- except Exception as e:
- logger.error(
- f"[私聊][{self.private_name}]从字典创建 UserInfo 时出错: {e}, 字典内容: {user_info_dict}"
- )
- # 可以选择在这里返回或记录错误,避免后续代码出错
- return
- elif user_info_dict is not None:
- logger.warning(
- f"[私聊][{self.private_name}]收到的 user_info 不是预期的字典类型: {type(user_info_dict)}"
- )
- # 根据需要处理非字典情况,这里暂时返回
- return
-
- message = {
- "message_id": message_id,
- "processed_plain_text": processed_plain_text,
- "detailed_plain_text": detailed_plain_text,
- "user_info": user_info_dict, # 存储原始字典或 UserInfo 对象,取决于你的 update_from_message 如何处理
- "time": time_value,
- }
- # 传递 UserInfo 对象(如果成功创建)或原始字典
- await self.observation_info.update_from_message(message, user_info) # 修改:传递 user_info 对象
-
- elif notification_type == NotificationType.COLD_CHAT:
- # 处理冷场通知
- is_cold = data.get("is_cold", False)
- await self.observation_info.update_cold_chat_status(is_cold, time.time()) # 修改:改为 await 调用
-
- elif notification_type == NotificationType.ACTIVE_CHAT:
- # 处理活跃通知 (通常由 COLD_CHAT 的反向状态处理)
- is_active = data.get("is_active", False)
- self.observation_info.is_cold = not is_active
-
- elif notification_type == NotificationType.BOT_SPEAKING:
- # 处理机器人说话通知 (按需实现)
- self.observation_info.is_typing = False
- self.observation_info.last_bot_speak_time = time.time()
-
- elif notification_type == NotificationType.USER_SPEAKING:
- # 处理用户说话通知
- self.observation_info.is_typing = False
- self.observation_info.last_user_speak_time = time.time()
-
- elif notification_type == NotificationType.MESSAGE_DELETED:
- # 处理消息删除通知
- message_id = data.get("message_id")
- # 从 unprocessed_messages 中移除被删除的消息
- original_count = len(self.observation_info.unprocessed_messages)
- self.observation_info.unprocessed_messages = [
- msg for msg in self.observation_info.unprocessed_messages if msg.get("message_id") != message_id
- ]
- if len(self.observation_info.unprocessed_messages) < original_count:
- logger.info(f"[私聊][{self.private_name}]移除了未处理的消息 (ID: {message_id})")
-
- elif notification_type == NotificationType.USER_JOINED:
- # 处理用户加入通知 (如果适用私聊场景)
- user_id = data.get("user_id")
- if user_id:
- self.observation_info.active_users.add(str(user_id)) # 确保是字符串
-
- elif notification_type == NotificationType.USER_LEFT:
- # 处理用户离开通知 (如果适用私聊场景)
- user_id = data.get("user_id")
- if user_id:
- self.observation_info.active_users.discard(str(user_id)) # 确保是字符串
-
- elif notification_type == NotificationType.ERROR:
- # 处理错误通知
- error_msg = data.get("error", "未提供错误信息")
- logger.error(f"[私聊][{self.private_name}]收到错误通知: {error_msg}")
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]处理通知时发生错误: {e}")
- logger.error(traceback.format_exc()) # 打印详细堆栈信息
-
-
-# @dataclass <-- 这个,不需要了(递黄瓜)
-class ObservationInfo:
- """决策信息类,用于收集和管理来自chat_observer的通知信息 (手动实现 __init__)"""
-
- # 类型提示保留,可用于文档和静态分析
- private_name: str
- chat_history: List[Dict[str, Any]]
- chat_history_str: str
- unprocessed_messages: List[Dict[str, Any]]
- active_users: Set[str]
- last_bot_speak_time: Optional[float]
- last_user_speak_time: Optional[float]
- last_message_time: Optional[float]
- last_message_id: Optional[str]
- last_message_content: str
- last_message_sender: Optional[str]
- bot_id: Optional[str]
- chat_history_count: int
- new_messages_count: int
- cold_chat_start_time: Optional[float]
- cold_chat_duration: float
- is_typing: bool
- is_cold_chat: bool
- changed: bool
- chat_observer: Optional[ChatObserver]
- handler: Optional[ObservationInfoHandler]
-
- def __init__(self, private_name: str):
- """
- 手动初始化 ObservationInfo 的所有实例变量。
- """
-
- # 接收的参数
- self.private_name: str = private_name
-
- # data_list
- self.chat_history: List[Dict[str, Any]] = []
- self.chat_history_str: str = ""
- self.unprocessed_messages: List[Dict[str, Any]] = []
- self.active_users: Set[str] = set()
-
- # data
- self.last_bot_speak_time: Optional[float] = None
- self.last_user_speak_time: Optional[float] = None
- self.last_message_time: Optional[float] = None
- self.last_message_id: Optional[str] = None
- self.last_message_content: str = ""
- self.last_message_sender: Optional[str] = None
- self.bot_id: Optional[str] = None
- self.chat_history_count: int = 0
- self.new_messages_count: int = 0
- self.cold_chat_start_time: Optional[float] = None
- self.cold_chat_duration: float = 0.0
-
- # state
- self.is_typing: bool = False
- self.is_cold_chat: bool = False
- self.changed: bool = False
-
- # 关联对象
- self.chat_observer: Optional[ChatObserver] = None
-
- self.handler: ObservationInfoHandler = ObservationInfoHandler(self, self.private_name)
-
- def bind_to_chat_observer(self, chat_observer: ChatObserver):
- """绑定到指定的chat_observer
-
- Args:
- chat_observer: 要绑定的 ChatObserver 实例
- """
- if self.chat_observer:
- logger.warning(f"[私聊][{self.private_name}]尝试重复绑定 ChatObserver")
- return
-
- self.chat_observer = chat_observer
- try:
- if not self.handler: # 确保 handler 已经被创建
- logger.error(f"[私聊][{self.private_name}] 尝试绑定时 handler 未初始化!")
- self.chat_observer = None # 重置,防止后续错误
- return
-
- # 注册关心的通知类型
- self.chat_observer.notification_manager.register_handler(
- target="observation_info", notification_type=NotificationType.NEW_MESSAGE, handler=self.handler
- )
- self.chat_observer.notification_manager.register_handler(
- target="observation_info", notification_type=NotificationType.COLD_CHAT, handler=self.handler
- )
- # 可以根据需要注册更多通知类型
- # self.chat_observer.notification_manager.register_handler(
- # target="observation_info", notification_type=NotificationType.MESSAGE_DELETED, handler=self.handler
- # )
- logger.info(f"[私聊][{self.private_name}]成功绑定到 ChatObserver")
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]绑定到 ChatObserver 时出错: {e}")
- self.chat_observer = None # 绑定失败,重置
-
- def unbind_from_chat_observer(self):
- """解除与chat_observer的绑定"""
- if (
- self.chat_observer and hasattr(self.chat_observer, "notification_manager") and self.handler
- ): # 增加 handler 检查
- try:
- self.chat_observer.notification_manager.unregister_handler(
- target="observation_info", notification_type=NotificationType.NEW_MESSAGE, handler=self.handler
- )
- self.chat_observer.notification_manager.unregister_handler(
- target="observation_info", notification_type=NotificationType.COLD_CHAT, handler=self.handler
- )
- # 如果注册了其他类型,也要在这里注销
- # self.chat_observer.notification_manager.unregister_handler(
- # target="observation_info", notification_type=NotificationType.MESSAGE_DELETED, handler=self.handler
- # )
- logger.info(f"[私聊][{self.private_name}]成功从 ChatObserver 解绑")
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]从 ChatObserver 解绑时出错: {e}")
- finally: # 确保 chat_observer 被重置
- self.chat_observer = None
- else:
- logger.warning(f"[私聊][{self.private_name}]尝试解绑时 ChatObserver 不存在、无效或 handler 未设置")
-
- # 修改:update_from_message 接收 UserInfo 对象
- async def update_from_message(self, message: Dict[str, Any], user_info: Optional[UserInfo]):
- """从消息更新信息
-
- Args:
- message: 消息数据字典
- user_info: 解析后的 UserInfo 对象 (可能为 None)
- """
- message_time = message.get("time")
- message_id = message.get("message_id")
- processed_text = message.get("processed_plain_text", "")
-
- # 只有在新消息到达时才更新 last_message 相关信息
- if message_time and message_time > (self.last_message_time or 0):
- self.last_message_time = message_time
- self.last_message_id = message_id
- self.last_message_content = processed_text
- # 重置冷场计时器
- self.is_cold_chat = False
- self.cold_chat_start_time = None
- self.cold_chat_duration = 0.0
-
- if user_info:
- sender_id = str(user_info.user_id) # 确保是字符串
- self.last_message_sender = sender_id
- # 更新发言时间
- if sender_id == self.bot_id:
- self.last_bot_speak_time = message_time
- else:
- self.last_user_speak_time = message_time
- self.active_users.add(sender_id) # 用户发言则认为其活跃
- else:
- logger.warning(
- f"[私聊][{self.private_name}]处理消息更新时缺少有效的 UserInfo 对象, message_id: {message_id}"
- )
- self.last_message_sender = None # 发送者未知
-
- # 将原始消息字典添加到未处理列表
- self.unprocessed_messages.append(message)
- self.new_messages_count = len(self.unprocessed_messages) # 直接用列表长度
-
- # logger.debug(f"[私聊][{self.private_name}]消息更新: last_time={self.last_message_time}, new_count={self.new_messages_count}")
- self.update_changed() # 标记状态已改变
- else:
- # 如果消息时间戳不是最新的,可能不需要处理,或者记录一个警告
- pass
- # logger.warning(f"[私聊][{self.private_name}]收到过时或无效时间戳的消息: ID={message_id}, time={message_time}")
-
- def update_changed(self):
- """标记状态已改变,并重置标记"""
- # logger.debug(f"[私聊][{self.private_name}]状态标记为已改变 (changed=True)")
- self.changed = True
-
- async def update_cold_chat_status(self, is_cold: bool, current_time: float):
- """更新冷场状态
-
- Args:
- is_cold: 是否处于冷场状态
- current_time: 当前时间戳
- """
- if is_cold != self.is_cold_chat: # 仅在状态变化时更新
- self.is_cold_chat = is_cold
- if is_cold:
- # 进入冷场状态
- self.cold_chat_start_time = (
- self.last_message_time or current_time
- ) # 从最后消息时间开始算,或从当前时间开始
- logger.info(f"[私聊][{self.private_name}]进入冷场状态,开始时间: {self.cold_chat_start_time}")
- else:
- # 结束冷场状态
- if self.cold_chat_start_time:
- self.cold_chat_duration = current_time - self.cold_chat_start_time
- logger.info(f"[私聊][{self.private_name}]结束冷场状态,持续时间: {self.cold_chat_duration:.2f} 秒")
- self.cold_chat_start_time = None # 重置开始时间
- self.update_changed() # 状态变化,标记改变
-
- # 即使状态没变,如果是冷场状态,也更新持续时间
- if self.is_cold_chat and self.cold_chat_start_time:
- self.cold_chat_duration = current_time - self.cold_chat_start_time
-
- def get_active_duration(self) -> float:
- """获取当前活跃时长 (距离最后一条消息的时间)
-
- Returns:
- float: 最后一条消息到现在的时长(秒)
- """
- if not self.last_message_time:
- return 0.0
- return time.time() - self.last_message_time
-
- def get_user_response_time(self) -> Optional[float]:
- """获取用户最后响应时间 (距离用户最后发言的时间)
-
- Returns:
- Optional[float]: 用户最后发言到现在的时长(秒),如果没有用户发言则返回None
- """
- if not self.last_user_speak_time:
- return None
- return time.time() - self.last_user_speak_time
-
- def get_bot_response_time(self) -> Optional[float]:
- """获取机器人最后响应时间 (距离机器人最后发言的时间)
-
- Returns:
- Optional[float]: 机器人最后发言到现在的时长(秒),如果没有机器人发言则返回None
- """
- if not self.last_bot_speak_time:
- return None
- return time.time() - self.last_bot_speak_time
-
- async def clear_unprocessed_messages(self):
- """将未处理消息移入历史记录,并更新相关状态"""
- if not self.unprocessed_messages:
- return # 没有未处理消息,直接返回
-
- # logger.debug(f"[私聊][{self.private_name}]处理 {len(self.unprocessed_messages)} 条未处理消息...")
- # 将未处理消息添加到历史记录中 (确保历史记录有长度限制,避免无限增长)
- max_history_len = 100 # 示例:最多保留100条历史记录
- self.chat_history.extend(self.unprocessed_messages)
- if len(self.chat_history) > max_history_len:
- self.chat_history = self.chat_history[-max_history_len:]
-
- # 更新历史记录字符串 (只使用最近一部分生成,例如20条)
- history_slice_for_str = self.chat_history[-20:]
- try:
- self.chat_history_str = build_readable_messages(
- history_slice_for_str,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0, # read_mark 可能需要根据逻辑调整
- )
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]构建聊天记录字符串时出错: {e}")
- self.chat_history_str = "[构建聊天记录出错]" # 提供错误提示
-
- # 清空未处理消息列表和计数
- # cleared_count = len(self.unprocessed_messages)
- self.unprocessed_messages.clear()
- self.new_messages_count = 0
- # self.has_unread_messages = False # 这个状态可以通过 new_messages_count 判断
-
- self.chat_history_count = len(self.chat_history) # 更新历史记录总数
- # logger.debug(f"[私聊][{self.private_name}]已处理 {cleared_count} 条消息,当前历史记录 {self.chat_history_count} 条。")
-
- self.update_changed() # 状态改变
diff --git a/src/experimental/PFC/pfc.py b/src/experimental/PFC/pfc.py
deleted file mode 100644
index 4050ae58..00000000
--- a/src/experimental/PFC/pfc.py
+++ /dev/null
@@ -1,346 +0,0 @@
-from typing import List, Tuple, TYPE_CHECKING
-from src.common.logger import get_logger
-from src.llm_models.utils_model import LLMRequest
-from src.config.config import global_config
-from src.experimental.PFC.chat_observer import ChatObserver
-from src.experimental.PFC.pfc_utils import get_items_from_json
-from src.individuality.individuality import get_individuality
-from src.experimental.PFC.conversation_info import ConversationInfo
-from src.experimental.PFC.observation_info import ObservationInfo
-from src.chat.utils.chat_message_builder import build_readable_messages
-from rich.traceback import install
-
-install(extra_lines=3)
-
-if TYPE_CHECKING:
- pass
-
-logger = get_logger("pfc")
-
-
-def _calculate_similarity(goal1: str, goal2: str) -> float:
- """简单计算两个目标之间的相似度
-
- 这里使用一个简单的实现,实际可以使用更复杂的文本相似度算法
-
- Args:
- goal1: 第一个目标
- goal2: 第二个目标
-
- Returns:
- float: 相似度得分 (0-1)
- """
- # 简单实现:检查重叠字数比例
- words1 = set(goal1)
- words2 = set(goal2)
- overlap = len(words1.intersection(words2))
- total = len(words1.union(words2))
- return overlap / total if total > 0 else 0
-
-
-class GoalAnalyzer:
- """对话目标分析器"""
-
- def __init__(self, stream_id: str, private_name: str):
- # TODO: API-Adapter修改标记
- self.llm = LLMRequest(
- model=global_config.model.utils, temperature=0.7, max_tokens=1000, request_type="conversation_goal"
- )
-
- self.personality_info = get_individuality().get_prompt(x_person=2, level=3)
- self.name = global_config.bot.nickname
- self.nick_name = global_config.bot.alias_names
- self.private_name = private_name
- self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
-
- # 多目标存储结构
- self.goals = [] # 存储多个目标
- self.max_goals = 3 # 同时保持的最大目标数量
- self.current_goal_and_reason = None
-
- async def analyze_goal(self, conversation_info: ConversationInfo, observation_info: ObservationInfo):
- """分析对话历史并设定目标
-
- Args:
- conversation_info: 对话信息
- observation_info: 观察信息
-
- Returns:
- Tuple[str, str, str]: (目标, 方法, 原因)
- """
- # 构建对话目标
- goals_str = ""
- if conversation_info.goal_list:
- for goal_reason in conversation_info.goal_list:
- if isinstance(goal_reason, dict):
- goal = goal_reason.get("goal", "目标内容缺失")
- reasoning = goal_reason.get("reasoning", "没有明确原因")
- else:
- goal = str(goal_reason)
- reasoning = "没有明确原因"
-
- goal_str = f"目标:{goal},产生该对话目标的原因:{reasoning}\n"
- goals_str += goal_str
- else:
- goal = "目前没有明确对话目标"
- reasoning = "目前没有明确对话目标,最好思考一个对话目标"
- goals_str = f"目标:{goal},产生该对话目标的原因:{reasoning}\n"
-
- # 获取聊天历史记录
- chat_history_text = observation_info.chat_history_str
-
- if observation_info.new_messages_count > 0:
- new_messages_list = observation_info.unprocessed_messages
- new_messages_str = build_readable_messages(
- new_messages_list,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0,
- )
- chat_history_text += f"\n--- 以下是 {observation_info.new_messages_count} 条新消息 ---\n{new_messages_str}"
-
- # await observation_info.clear_unprocessed_messages()
-
- persona_text = f"你的名字是{self.name},{self.personality_info}。"
- # 构建action历史文本
- action_history_list = conversation_info.done_action
- action_history_text = "你之前做的事情是:"
- for action in action_history_list:
- action_history_text += f"{action}\n"
-
- prompt = f"""{persona_text}。现在你在参与一场QQ聊天,请分析以下聊天记录,并根据你的性格特征确定多个明确的对话目标。
-这些目标应该反映出对话的不同方面和意图。
-
-{action_history_text}
-当前对话目标:
-{goals_str}
-
-聊天记录:
-{chat_history_text}
-
-请分析当前对话并确定最适合的对话目标。你可以:
-1. 保持现有目标不变
-2. 修改现有目标
-3. 添加新目标
-4. 删除不再相关的目标
-5. 如果你想结束对话,请设置一个目标,目标goal为"结束对话",原因reasoning为你希望结束对话
-
-请以JSON数组格式输出当前的所有对话目标,每个目标包含以下字段:
-1. goal: 对话目标(简短的一句话)
-2. reasoning: 对话原因,为什么设定这个目标(简要解释)
-
-输出格式示例:
-[
-{{
- "goal": "回答用户关于Python编程的具体问题",
- "reasoning": "用户提出了关于Python的技术问题,需要专业且准确的解答"
-}},
-{{
- "goal": "回答用户关于python安装的具体问题",
- "reasoning": "用户提出了关于Python的技术问题,需要专业且准确的解答"
-}}
-]"""
-
- logger.debug(f"[私聊][{self.private_name}]发送到LLM的提示词: {prompt}")
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.debug(f"[私聊][{self.private_name}]LLM原始返回内容: {content}")
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]分析对话目标时出错: {str(e)}")
- content = ""
-
- # 使用改进后的get_items_from_json函数处理JSON数组
- success, result = get_items_from_json(
- content,
- self.private_name,
- "goal",
- "reasoning",
- required_types={"goal": str, "reasoning": str},
- allow_array=True,
- )
-
- if success:
- # 判断结果是单个字典还是字典列表
- if isinstance(result, list):
- # 清空现有目标列表并添加新目标
- conversation_info.goal_list = []
- for item in result:
- conversation_info.goal_list.append(item)
-
- # 返回第一个目标作为当前主要目标(如果有)
- if result:
- first_goal = result[0]
- return first_goal.get("goal", ""), "", first_goal.get("reasoning", "")
- else:
- # 单个目标的情况
- conversation_info.goal_list.append(result)
- return goal, "", reasoning
-
- # 如果解析失败,返回默认值
- return "", "", ""
-
- async def _update_goals(self, new_goal: str, method: str, reasoning: str):
- """更新目标列表
-
- Args:
- new_goal: 新的目标
- method: 实现目标的方法
- reasoning: 目标的原因
- """
- # 检查新目标是否与现有目标相似
- for i, (existing_goal, _, _) in enumerate(self.goals):
- if _calculate_similarity(new_goal, existing_goal) > 0.7: # 相似度阈值
- # 更新现有目标
- self.goals[i] = (new_goal, method, reasoning)
- # 将此目标移到列表前面(最主要的位置)
- self.goals.insert(0, self.goals.pop(i))
- return
-
- # 添加新目标到列表前面
- self.goals.insert(0, (new_goal, method, reasoning))
-
- # 限制目标数量
- if len(self.goals) > self.max_goals:
- self.goals.pop() # 移除最老的目标
-
- async def get_all_goals(self) -> List[Tuple[str, str, str]]:
- """获取所有当前目标
-
- Returns:
- List[Tuple[str, str, str]]: 目标列表,每项为(目标, 方法, 原因)
- """
- return self.goals.copy()
-
- async def get_alternative_goals(self) -> List[Tuple[str, str, str]]:
- """获取除了当前主要目标外的其他备选目标
-
- Returns:
- List[Tuple[str, str, str]]: 备选目标列表
- """
- if len(self.goals) <= 1:
- return []
- return self.goals[1:].copy()
-
- async def analyze_conversation(self, goal, reasoning):
- messages = self.chat_observer.get_cached_messages()
- chat_history_text = build_readable_messages(
- messages,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0,
- )
-
- persona_text = f"你的名字是{self.name},{self.personality_info}。"
- # ===> Persona 文本构建结束 <===
-
- # --- 修改 Prompt 字符串,使用 persona_text ---
- prompt = f"""{persona_text}。现在你在参与一场QQ聊天,
- 当前对话目标:{goal}
- 产生该对话目标的原因:{reasoning}
-
- 请分析以下聊天记录,并根据你的性格特征评估该目标是否已经达到,或者你是否希望停止该次对话。
- 聊天记录:
- {chat_history_text}
- 请以JSON格式输出,包含以下字段:
- 1. goal_achieved: 对话目标是否已经达到(true/false)
- 2. stop_conversation: 是否希望停止该次对话(true/false)
- 3. reason: 为什么希望停止该次对话(简要解释)
-
-输出格式示例:
-{{
- "goal_achieved": true,
- "stop_conversation": false,
- "reason": "虽然目标已达成,但对话仍然有继续的价值"
-}}"""
-
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.debug(f"[私聊][{self.private_name}]LLM原始返回内容: {content}")
-
- # 尝试解析JSON
- success, result = get_items_from_json(
- content,
- self.private_name,
- "goal_achieved",
- "stop_conversation",
- "reason",
- required_types={"goal_achieved": bool, "stop_conversation": bool, "reason": str},
- )
-
- if not success:
- logger.error(f"[私聊][{self.private_name}]无法解析对话分析结果JSON")
- return False, False, "解析结果失败"
-
- goal_achieved = result["goal_achieved"]
- stop_conversation = result["stop_conversation"]
- reason = result["reason"]
-
- return goal_achieved, stop_conversation, reason
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]分析对话状态时出错: {str(e)}")
- return False, False, f"分析出错: {str(e)}"
-
-
-# 先注释掉,万一以后出问题了还能开回来(((
-# class DirectMessageSender:
-# """直接发送消息到平台的发送器"""
-
-# def __init__(self, private_name: str):
-# self.logger = get_logger("direct_sender")
-# self.storage = MessageStorage()
-# self.private_name = private_name
-
-# async def send_via_ws(self, message: MessageSending) -> None:
-# try:
-# await get_global_api().send_message(message)
-# except Exception as e:
-# raise ValueError(f"未找到平台:{message.message_info.platform} 的url配置,请检查配置文件") from e
-
-# async def send_message(
-# self,
-# chat_stream: ChatStream,
-# content: str,
-# reply_to_message: Optional[Message] = None,
-# ) -> None:
-# """直接发送消息到平台
-
-# Args:
-# chat_stream: 聊天流
-# content: 消息内容
-# reply_to_message: 要回复的消息
-# """
-# # 构建消息对象
-# message_segment = Seg(type="text", data=content)
-# bot_user_info = UserInfo(
-# user_id=global_config.BOT_QQ,
-# user_nickname=global_config.bot.nickname,
-# platform=chat_stream.platform,
-# )
-
-# message = MessageSending(
-# message_id=f"dm{round(time.time(), 2)}",
-# chat_stream=chat_stream,
-# bot_user_info=bot_user_info,
-# sender_info=reply_to_message.message_info.user_info if reply_to_message else None,
-# message_segment=message_segment,
-# reply=reply_to_message,
-# is_head=True,
-# is_emoji=False,
-# thinking_start_time=time.time(),
-# )
-
-# # 处理消息
-# await message.process()
-
-# _message_json = message.to_dict()
-
-# # 发送消息
-# try:
-# await self.send_via_ws(message)
-# await self.storage.store_message(message, chat_stream)
-# logger.info(f"[私聊][{self.private_name}]PFC消息已发送: {content}")
-# except Exception as e:
-# logger.error(f"[私聊][{self.private_name}]PFC消息发送失败: {str(e)}")
diff --git a/src/experimental/PFC/pfc_KnowledgeFetcher.py b/src/experimental/PFC/pfc_KnowledgeFetcher.py
deleted file mode 100644
index 38a6dafb..00000000
--- a/src/experimental/PFC/pfc_KnowledgeFetcher.py
+++ /dev/null
@@ -1,86 +0,0 @@
-from typing import List, Tuple
-from src.common.logger import get_logger
-from src.chat.memory_system.Hippocampus import hippocampus_manager
-from src.llm_models.utils_model import LLMRequest
-from src.config.config import global_config
-from src.chat.message_receive.message import Message
-from src.chat.knowledge.knowledge_lib import qa_manager
-from src.chat.utils.chat_message_builder import build_readable_messages
-
-logger = get_logger("knowledge_fetcher")
-
-
-class KnowledgeFetcher:
- """知识调取器"""
-
- def __init__(self, private_name: str):
- # TODO: API-Adapter修改标记
- self.llm = LLMRequest(
- model=global_config.model.utils,
- temperature=global_config.model.utils["temp"],
- max_tokens=1000,
- request_type="knowledge_fetch",
- )
- self.private_name = private_name
-
- def _lpmm_get_knowledge(self, query: str) -> str:
- """获取相关知识
-
- Args:
- query: 查询内容
-
- Returns:
- str: 构造好的,带相关度的知识
- """
-
- logger.debug(f"[私聊][{self.private_name}]正在从LPMM知识库中获取知识")
- try:
- knowledge_info = qa_manager.get_knowledge(query)
- logger.debug(f"[私聊][{self.private_name}]LPMM知识库查询结果: {knowledge_info:150}")
- return knowledge_info
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]LPMM知识库搜索工具执行失败: {str(e)}")
- return "未找到匹配的知识"
-
- async def fetch(self, query: str, chat_history: List[Message]) -> Tuple[str, str]:
- """获取相关知识
-
- Args:
- query: 查询内容
- chat_history: 聊天历史
-
- Returns:
- Tuple[str, str]: (获取的知识, 知识来源)
- """
- # 构建查询上下文
- chat_history_text = build_readable_messages(
- chat_history,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0,
- )
-
- # 从记忆中获取相关知识
- related_memory = await hippocampus_manager.get_memory_from_text(
- text=f"{query}\n{chat_history_text}",
- max_memory_num=3,
- max_memory_length=2,
- max_depth=3,
- fast_retrieval=False,
- )
- knowledge_text = ""
- sources_text = "无记忆匹配" # 默认值
- if related_memory:
- sources = []
- for memory in related_memory:
- knowledge_text += memory[1] + "\n"
- sources.append(f"记忆片段{memory[0]}")
- knowledge_text = knowledge_text.strip()
- sources_text = ",".join(sources)
-
- knowledge_text += "\n现在有以下**知识**可供参考:\n "
- knowledge_text += self._lpmm_get_knowledge(query)
- knowledge_text += "\n请记住这些**知识**,并根据**知识**回答问题。\n"
-
- return knowledge_text or "未找到相关知识", sources_text or "无记忆匹配"
diff --git a/src/experimental/PFC/pfc_manager.py b/src/experimental/PFC/pfc_manager.py
deleted file mode 100644
index 174be78b..00000000
--- a/src/experimental/PFC/pfc_manager.py
+++ /dev/null
@@ -1,115 +0,0 @@
-import time
-from typing import Dict, Optional
-from src.common.logger import get_logger
-from .conversation import Conversation
-import traceback
-
-logger = get_logger("pfc_manager")
-
-
-class PFCManager:
- """PFC对话管理器,负责管理所有对话实例"""
-
- # 单例模式
- _instance = None
-
- # 会话实例管理
- _instances: Dict[str, Conversation] = {}
- _initializing: Dict[str, bool] = {}
-
- @classmethod
- def get_instance(cls) -> "PFCManager":
- """获取管理器单例
-
- Returns:
- PFCManager: 管理器实例
- """
- if cls._instance is None:
- cls._instance = PFCManager()
- return cls._instance
-
- async def get_or_create_conversation(self, stream_id: str, private_name: str) -> Optional[Conversation]:
- """获取或创建对话实例
-
- Args:
- stream_id: 聊天流ID
- private_name: 私聊名称
-
- Returns:
- Optional[Conversation]: 对话实例,创建失败则返回None
- """
- # 检查是否已经有实例
- if stream_id in self._initializing and self._initializing[stream_id]:
- logger.debug(f"[私聊][{private_name}]会话实例正在初始化中: {stream_id}")
- return None
-
- if stream_id in self._instances and self._instances[stream_id].should_continue:
- logger.debug(f"[私聊][{private_name}]使用现有会话实例: {stream_id}")
- return self._instances[stream_id]
- if stream_id in self._instances:
- instance = self._instances[stream_id]
- if (
- hasattr(instance, "ignore_until_timestamp")
- and instance.ignore_until_timestamp
- and time.time() < instance.ignore_until_timestamp
- ):
- logger.debug(f"[私聊][{private_name}]会话实例当前处于忽略状态: {stream_id}")
- # 返回 None 阻止交互。或者可以返回实例但标记它被忽略了喵?
- # 还是返回 None 吧喵。
- return None
-
- # 检查 should_continue 状态
- if instance.should_continue:
- logger.debug(f"[私聊][{private_name}]使用现有会话实例: {stream_id}")
- return instance
- # else: 实例存在但不应继续
- try:
- # 创建新实例
- logger.info(f"[私聊][{private_name}]创建新的对话实例: {stream_id}")
- self._initializing[stream_id] = True
- # 创建实例
- conversation_instance = Conversation(stream_id, private_name)
- self._instances[stream_id] = conversation_instance
-
- # 启动实例初始化
- await self._initialize_conversation(conversation_instance)
- except Exception as e:
- logger.error(f"[私聊][{private_name}]创建会话实例失败: {stream_id}, 错误: {e}")
- return None
-
- return conversation_instance
-
- async def _initialize_conversation(self, conversation: Conversation):
- """初始化会话实例
-
- Args:
- conversation: 要初始化的会话实例
- """
- stream_id = conversation.stream_id
- private_name = conversation.private_name
-
- try:
- logger.info(f"[私聊][{private_name}]开始初始化会话实例: {stream_id}")
- # 启动初始化流程
- await conversation._initialize()
-
- # 标记初始化完成
- self._initializing[stream_id] = False
-
- logger.info(f"[私聊][{private_name}]会话实例 {stream_id} 初始化完成")
-
- except Exception as e:
- logger.error(f"[私聊][{private_name}]管理器初始化会话实例失败: {stream_id}, 错误: {e}")
- logger.error(f"[私聊][{private_name}]{traceback.format_exc()}")
- # 清理失败的初始化
-
- async def get_conversation(self, stream_id: str) -> Optional[Conversation]:
- """获取已存在的会话实例
-
- Args:
- stream_id: 聊天流ID
-
- Returns:
- Optional[Conversation]: 会话实例,不存在则返回None
- """
- return self._instances.get(stream_id)
diff --git a/src/experimental/PFC/pfc_types.py b/src/experimental/PFC/pfc_types.py
deleted file mode 100644
index 0ea5eda6..00000000
--- a/src/experimental/PFC/pfc_types.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from enum import Enum
-from typing import Literal
-
-
-class ConversationState(Enum):
- """对话状态"""
-
- INIT = "初始化"
- RETHINKING = "重新思考"
- ANALYZING = "分析历史"
- PLANNING = "规划目标"
- GENERATING = "生成回复"
- CHECKING = "检查回复"
- SENDING = "发送消息"
- FETCHING = "获取知识"
- WAITING = "等待"
- LISTENING = "倾听"
- ENDED = "结束"
- JUDGING = "判断"
- IGNORED = "屏蔽"
-
-
-ActionType = Literal["direct_reply", "fetch_knowledge", "wait"]
diff --git a/src/experimental/PFC/pfc_utils.py b/src/experimental/PFC/pfc_utils.py
deleted file mode 100644
index b9e93ee5..00000000
--- a/src/experimental/PFC/pfc_utils.py
+++ /dev/null
@@ -1,127 +0,0 @@
-import json
-import re
-from typing import Dict, Any, Optional, Tuple, List, Union
-from src.common.logger import get_logger
-
-logger = get_logger("pfc_utils")
-
-
-def get_items_from_json(
- content: str,
- private_name: str,
- *items: str,
- default_values: Optional[Dict[str, Any]] = None,
- required_types: Optional[Dict[str, type]] = None,
- allow_array: bool = True,
-) -> Tuple[bool, Union[Dict[str, Any], List[Dict[str, Any]]]]:
- """从文本中提取JSON内容并获取指定字段
-
- Args:
- content: 包含JSON的文本
- private_name: 私聊名称
- *items: 要提取的字段名
- default_values: 字段的默认值,格式为 {字段名: 默认值}
- required_types: 字段的必需类型,格式为 {字段名: 类型}
- allow_array: 是否允许解析JSON数组
-
- Returns:
- Tuple[bool, Union[Dict[str, Any], List[Dict[str, Any]]]]: (是否成功, 提取的字段字典或字典列表)
- """
- content = content.strip()
- result = {}
-
- # 设置默认值
- if default_values:
- result.update(default_values)
-
- # 首先尝试解析为JSON数组
- if allow_array:
- try:
- # 尝试找到文本中的JSON数组
- array_pattern = r"\[[\s\S]*\]"
- array_match = re.search(array_pattern, content)
- if array_match:
- array_content = array_match.group()
- json_array = json.loads(array_content)
-
- # 确认是数组类型
- if isinstance(json_array, list):
- # 验证数组中的每个项目是否包含所有必需字段
- valid_items = []
- for item in json_array:
- if not isinstance(item, dict):
- continue
-
- # 检查是否有所有必需字段
- if all(field in item for field in items):
- # 验证字段类型
- if required_types:
- type_valid = True
- for field, expected_type in required_types.items():
- if field in item and not isinstance(item[field], expected_type):
- type_valid = False
- break
-
- if not type_valid:
- continue
-
- # 验证字符串字段不为空
- string_valid = True
- for field in items:
- if isinstance(item[field], str) and not item[field].strip():
- string_valid = False
- break
-
- if not string_valid:
- continue
-
- valid_items.append(item)
-
- if valid_items:
- return True, valid_items
- except json.JSONDecodeError:
- logger.debug(f"[私聊][{private_name}]JSON数组解析失败,尝试解析单个JSON对象")
- except Exception as e:
- logger.debug(f"[私聊][{private_name}]尝试解析JSON数组时出错: {str(e)}")
-
- # 尝试解析JSON对象
- try:
- json_data = json.loads(content)
- except json.JSONDecodeError:
- # 如果直接解析失败,尝试查找和提取JSON部分
- json_pattern = r"\{[^{}]*\}"
- json_match = re.search(json_pattern, content)
- if json_match:
- try:
- json_data = json.loads(json_match.group())
- except json.JSONDecodeError:
- logger.error(f"[私聊][{private_name}]提取的JSON内容解析失败")
- return False, result
- else:
- logger.error(f"[私聊][{private_name}]无法在返回内容中找到有效的JSON")
- return False, result
-
- # 提取字段
- for item in items:
- if item in json_data:
- result[item] = json_data[item]
-
- # 验证必需字段
- if not all(item in result for item in items):
- logger.error(f"[私聊][{private_name}]JSON缺少必要字段,实际内容: {json_data}")
- return False, result
-
- # 验证字段类型
- if required_types:
- for field, expected_type in required_types.items():
- if field in result and not isinstance(result[field], expected_type):
- logger.error(f"[私聊][{private_name}]{field} 必须是 {expected_type.__name__} 类型")
- return False, result
-
- # 验证字符串字段不为空
- for field in items:
- if isinstance(result[field], str) and not result[field].strip():
- logger.error(f"[私聊][{private_name}]{field} 不能为空")
- return False, result
-
- return True, result
diff --git a/src/experimental/PFC/reply_checker.py b/src/experimental/PFC/reply_checker.py
deleted file mode 100644
index 78319d00..00000000
--- a/src/experimental/PFC/reply_checker.py
+++ /dev/null
@@ -1,183 +0,0 @@
-import json
-from typing import Tuple, List, Dict, Any
-from src.common.logger import get_logger
-from src.llm_models.utils_model import LLMRequest
-from src.config.config import global_config
-from src.experimental.PFC.chat_observer import ChatObserver
-from maim_message import UserInfo
-
-logger = get_logger("reply_checker")
-
-
-class ReplyChecker:
- """回复检查器"""
-
- def __init__(self, stream_id: str, private_name: str):
- self.llm = LLMRequest(
- model=global_config.llm_PFC_reply_checker, temperature=0.50, max_tokens=1000, request_type="reply_check"
- )
- self.name = global_config.bot.nickname
- self.private_name = private_name
- self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
- self.max_retries = 3 # 最大重试次数
-
- async def check(
- self, reply: str, goal: str, chat_history: List[Dict[str, Any]], chat_history_text: str, retry_count: int = 0
- ) -> Tuple[bool, str, bool]:
- """检查生成的回复是否合适
-
- Args:
- reply: 生成的回复
- goal: 对话目标
- chat_history: 对话历史记录
- chat_history_text: 对话历史记录文本
- retry_count: 当前重试次数
-
- Returns:
- Tuple[bool, str, bool]: (是否合适, 原因, 是否需要重新规划)
- """
- # 不再从 observer 获取,直接使用传入的 chat_history
- # messages = self.chat_observer.get_cached_messages(limit=20)
- try:
- # 筛选出最近由 Bot 自己发送的消息
- bot_messages = []
- for msg in reversed(chat_history):
- user_info = UserInfo.from_dict(msg.get("user_info", {}))
- if str(user_info.user_id) == str(global_config.bot.qq_account): # 确保比较的是字符串
- bot_messages.append(msg.get("processed_plain_text", ""))
- if len(bot_messages) >= 2: # 只和最近的两条比较
- break
- # 进行比较
- if bot_messages:
- # 可以用简单比较,或者更复杂的相似度库 (如 difflib)
- # 简单比较:是否完全相同
- if reply == bot_messages[0]: # 和最近一条完全一样
- logger.warning(
- f"[私聊][{self.private_name}]ReplyChecker 检测到回复与上一条 Bot 消息完全相同: '{reply}'"
- )
- return (
- False,
- "被逻辑检查拒绝:回复内容与你上一条发言完全相同,可以选择深入话题或寻找其它话题或等待",
- True,
- ) # 不合适,需要返回至决策层
- # 2. 相似度检查 (如果精确匹配未通过)
- import difflib # 导入 difflib 库
-
- # 计算编辑距离相似度,ratio() 返回 0 到 1 之间的浮点数
- similarity_ratio = difflib.SequenceMatcher(None, reply, bot_messages[0]).ratio()
- logger.debug(f"[私聊][{self.private_name}]ReplyChecker - 相似度: {similarity_ratio:.2f}")
-
- # 设置一个相似度阈值
- similarity_threshold = 0.9
- if similarity_ratio > similarity_threshold:
- logger.warning(
- f"[私聊][{self.private_name}]ReplyChecker 检测到回复与上一条 Bot 消息高度相似 (相似度 {similarity_ratio:.2f}): '{reply}'"
- )
- return (
- False,
- f"被逻辑检查拒绝:回复内容与你上一条发言高度相似 (相似度 {similarity_ratio:.2f}),可以选择深入话题或寻找其它话题或等待。",
- True,
- )
-
- except Exception as e:
- import traceback
-
- logger.error(f"[私聊][{self.private_name}]检查回复时出错: 类型={type(e)}, 值={e}")
- logger.error(f"[私聊][{self.private_name}]{traceback.format_exc()}") # 打印详细的回溯信息
-
- prompt = f"""你是一个聊天逻辑检查器,请检查以下回复或消息是否合适:
-
-当前对话目标:{goal}
-最新的对话记录:
-{chat_history_text}
-
-待检查的消息:
-{reply}
-
-请结合聊天记录检查以下几点:
-1. 这条消息是否依然符合当前对话目标和实现方式
-2. 这条消息是否与最新的对话记录保持一致性
-3. 是否存在重复发言,或重复表达同质内容(尤其是只是换一种方式表达了相同的含义)
-4. 这条消息是否包含违规内容(例如血腥暴力,政治敏感等)
-5. 这条消息是否以发送者的角度发言(不要让发送者自己回复自己的消息)
-6. 这条消息是否通俗易懂
-7. 这条消息是否有些多余,例如在对方没有回复的情况下,依然连续多次“消息轰炸”(尤其是已经连续发送3条信息的情况,这很可能不合理,需要着重判断)
-8. 这条消息是否使用了完全没必要的修辞
-9. 这条消息是否逻辑通顺
-10. 这条消息是否太过冗长了(通常私聊的每条消息长度在20字以内,除非特殊情况)
-11. 在连续多次发送消息的情况下,这条消息是否衔接自然,会不会显得奇怪(例如连续两条消息中部分内容重叠)
-
-请以JSON格式输出,包含以下字段:
-1. suitable: 是否合适 (true/false)
-2. reason: 原因说明
-3. need_replan: 是否需要重新决策 (true/false),当你认为此时已经不适合发消息,需要规划其它行动时,设为true
-
-输出格式示例:
-{{
- "suitable": true,
- "reason": "回复符合要求,虽然有可能略微偏离目标,但是整体内容流畅得体",
- "need_replan": false
-}}
-
-注意:请严格按照JSON格式输出,不要包含任何其他内容。"""
-
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.debug(f"[私聊][{self.private_name}]检查回复的原始返回: {content}")
-
- # 清理内容,尝试提取JSON部分
- content = content.strip()
- try:
- # 尝试直接解析
- result = json.loads(content)
- except json.JSONDecodeError:
- # 如果直接解析失败,尝试查找和提取JSON部分
- import re
-
- json_pattern = r"\{[^{}]*\}"
- json_match = re.search(json_pattern, content)
- if json_match:
- try:
- result = json.loads(json_match.group())
- except json.JSONDecodeError:
- # 如果JSON解析失败,尝试从文本中提取结果
- is_suitable = "不合适" not in content.lower() and "违规" not in content.lower()
- reason = content[:100] if content else "无法解析响应"
- need_replan = "重新规划" in content.lower() or "目标不适合" in content.lower()
- return is_suitable, reason, need_replan
- else:
- # 如果找不到JSON,从文本中判断
- is_suitable = "不合适" not in content.lower() and "违规" not in content.lower()
- reason = content[:100] if content else "无法解析响应"
- need_replan = "重新规划" in content.lower() or "目标不适合" in content.lower()
- return is_suitable, reason, need_replan
-
- # 验证JSON字段
- suitable = result.get("suitable", None)
- reason = result.get("reason", "未提供原因")
- need_replan = result.get("need_replan", False)
-
- # 如果suitable字段是字符串,转换为布尔值
- if isinstance(suitable, str):
- suitable = suitable.lower() == "true"
-
- # 如果suitable字段不存在或不是布尔值,从reason中判断
- if suitable is None:
- suitable = "不合适" not in reason.lower() and "违规" not in reason.lower()
-
- # 如果不合适且未达到最大重试次数,返回需要重试
- if not suitable and retry_count < self.max_retries:
- return False, reason, False
-
- # 如果不合适且已达到最大重试次数,返回需要重新规划
- if not suitable and retry_count >= self.max_retries:
- return False, f"多次重试后仍不合适: {reason}", True
-
- return suitable, reason, need_replan
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]检查回复时出错: {e}")
- # 如果出错且已达到最大重试次数,建议重新规划
- if retry_count >= self.max_retries:
- return False, "多次检查失败,建议重新规划", True
- return False, f"检查过程出错,建议重试: {str(e)}", False
diff --git a/src/experimental/PFC/reply_generator.py b/src/experimental/PFC/reply_generator.py
deleted file mode 100644
index 530eba6c..00000000
--- a/src/experimental/PFC/reply_generator.py
+++ /dev/null
@@ -1,227 +0,0 @@
-from typing import Tuple, List, Dict, Any
-from src.common.logger import get_logger
-from src.llm_models.utils_model import LLMRequest
-from src.config.config import global_config
-from src.experimental.PFC.chat_observer import ChatObserver
-from src.experimental.PFC.reply_checker import ReplyChecker
-from src.individuality.individuality import get_individuality
-from .observation_info import ObservationInfo
-from .conversation_info import ConversationInfo
-from src.chat.utils.chat_message_builder import build_readable_messages
-
-logger = get_logger("reply_generator")
-
-# --- 定义 Prompt 模板 ---
-
-# Prompt for direct_reply (首次回复)
-PROMPT_DIRECT_REPLY = """{persona_text}。现在你在参与一场QQ私聊,请根据以下信息生成一条回复:
-
-当前对话目标:{goals_str}
-
-{knowledge_info_str}
-
-最近的聊天记录:
-{chat_history_text}
-
-
-请根据上述信息,结合聊天记录,回复对方。该回复应该:
-1. 符合对话目标,以"你"的角度发言(不要自己与自己对话!)
-2. 符合你的性格特征和身份细节
-3. 通俗易懂,自然流畅,像正常聊天一样,简短(通常20字以内,除非特殊情况)
-4. 可以适当利用相关知识,但不要生硬引用
-5. 自然、得体,结合聊天记录逻辑合理,且没有重复表达同质内容
-
-请注意把握聊天内容,不要回复的太有条理,可以有个性。请分清"你"和对方说的话,不要把"你"说的话当做对方说的话,这是你自己说的话。
-可以回复得自然随意自然一些,就像真人一样,注意把握聊天内容,整体风格可以平和、简短,不要刻意突出自身学科背景,不要说你说过的话,可以简短,多简短都可以,但是避免冗长。
-请你注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出回复内容。
-不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。
-
-请直接输出回复内容,不需要任何额外格式。"""
-
-# Prompt for send_new_message (追问/补充)
-PROMPT_SEND_NEW_MESSAGE = """{persona_text}。现在你在参与一场QQ私聊,**刚刚你已经发送了一条或多条消息**,现在请根据以下信息再发一条新消息:
-
-当前对话目标:{goals_str}
-
-{knowledge_info_str}
-
-最近的聊天记录:
-{chat_history_text}
-
-
-请根据上述信息,结合聊天记录,继续发一条新消息(例如对之前消息的补充,深入话题,或追问等等)。该消息应该:
-1. 符合对话目标,以"你"的角度发言(不要自己与自己对话!)
-2. 符合你的性格特征和身份细节
-3. 通俗易懂,自然流畅,像正常聊天一样,简短(通常20字以内,除非特殊情况)
-4. 可以适当利用相关知识,但不要生硬引用
-5. 跟之前你发的消息自然的衔接,逻辑合理,且没有重复表达同质内容或部分重叠内容
-
-请注意把握聊天内容,不用太有条理,可以有个性。请分清"你"和对方说的话,不要把"你"说的话当做对方说的话,这是你自己说的话。
-这条消息可以自然随意自然一些,就像真人一样,注意把握聊天内容,整体风格可以平和、简短,不要刻意突出自身学科背景,不要说你说过的话,可以简短,多简短都可以,但是避免冗长。
-请你注意不要输出多余内容(包括前后缀,冒号和引号,括号,表情等),只输出消息内容。
-不要输出多余内容(包括前后缀,冒号和引号,括号,表情包,at或 @等 )。
-
-请直接输出回复内容,不需要任何额外格式。"""
-
-# Prompt for say_goodbye (告别语生成)
-PROMPT_FAREWELL = """{persona_text}。你在参与一场 QQ 私聊,现在对话似乎已经结束,你决定再发一条最后的消息来圆满结束。
-
-最近的聊天记录:
-{chat_history_text}
-
-请根据上述信息,结合聊天记录,构思一条**简短、自然、符合你人设**的最后的消息。
-这条消息应该:
-1. 从你自己的角度发言。
-2. 符合你的性格特征和身份细节。
-3. 通俗易懂,自然流畅,通常很简短。
-4. 自然地为这场对话画上句号,避免开启新话题或显得冗长、刻意。
-
-请像真人一样随意自然,**简洁是关键**。
-不要输出多余内容(包括前后缀、冒号、引号、括号、表情包、at或@等)。
-
-请直接输出最终的告别消息内容,不需要任何额外格式。"""
-
-
-class ReplyGenerator:
- """回复生成器"""
-
- def __init__(self, stream_id: str, private_name: str):
- self.llm = LLMRequest(
- model=global_config.llm_PFC_chat,
- temperature=global_config.llm_PFC_chat["temp"],
- request_type="reply_generation",
- )
- self.personality_info = get_individuality().get_prompt(x_person=2, level=3)
- self.name = global_config.bot.nickname
- self.private_name = private_name
- self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
- self.reply_checker = ReplyChecker(stream_id, private_name)
-
- # 修改 generate 方法签名,增加 action_type 参数
- async def generate(
- self, observation_info: ObservationInfo, conversation_info: ConversationInfo, action_type: str
- ) -> str:
- """生成回复
-
- Args:
- observation_info: 观察信息
- conversation_info: 对话信息
- action_type: 当前执行的动作类型 ('direct_reply' 或 'send_new_message')
-
- Returns:
- str: 生成的回复
- """
- # 构建提示词
- logger.debug(
- f"[私聊][{self.private_name}]开始生成回复 (动作类型: {action_type}):当前目标: {conversation_info.goal_list}"
- )
-
- # --- 构建通用 Prompt 参数 ---
- # (这部分逻辑基本不变)
-
- # 构建对话目标 (goals_str)
- goals_str = ""
- if conversation_info.goal_list:
- for goal_reason in conversation_info.goal_list:
- if isinstance(goal_reason, dict):
- goal = goal_reason.get("goal", "目标内容缺失")
- reasoning = goal_reason.get("reasoning", "没有明确原因")
- else:
- goal = str(goal_reason)
- reasoning = "没有明确原因"
-
- goal = str(goal) if goal is not None else "目标内容缺失"
- reasoning = str(reasoning) if reasoning is not None else "没有明确原因"
- goals_str += f"- 目标:{goal}\n 原因:{reasoning}\n"
- else:
- goals_str = "- 目前没有明确对话目标\n" # 简化无目标情况
-
- # --- 新增:构建知识信息字符串 ---
- knowledge_info_str = "【供参考的相关知识和记忆】\n" # 稍微改下标题,表明是供参考
- try:
- # 检查 conversation_info 是否有 knowledge_list 并且不为空
- if hasattr(conversation_info, "knowledge_list") and conversation_info.knowledge_list:
- # 最多只显示最近的 5 条知识
- recent_knowledge = conversation_info.knowledge_list[-5:]
- for i, knowledge_item in enumerate(recent_knowledge):
- if isinstance(knowledge_item, dict):
- query = knowledge_item.get("query", "未知查询")
- knowledge = knowledge_item.get("knowledge", "无知识内容")
- source = knowledge_item.get("source", "未知来源")
- # 只取知识内容的前 2000 个字
- knowledge_snippet = knowledge[:2000] + "..." if len(knowledge) > 2000 else knowledge
- knowledge_info_str += (
- f"{i + 1}. 关于 '{query}' (来源: {source}): {knowledge_snippet}\n" # 格式微调,更简洁
- )
- else:
- knowledge_info_str += f"{i + 1}. 发现一条格式不正确的知识记录。\n"
-
- if not recent_knowledge:
- knowledge_info_str += "- 暂无。\n" # 更简洁的提示
-
- else:
- knowledge_info_str += "- 暂无。\n"
- except AttributeError:
- logger.warning(f"[私聊][{self.private_name}]ConversationInfo 对象可能缺少 knowledge_list 属性。")
- knowledge_info_str += "- 获取知识列表时出错。\n"
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]构建知识信息字符串时出错: {e}")
- knowledge_info_str += "- 处理知识列表时出错。\n"
-
- # 获取聊天历史记录 (chat_history_text)
- chat_history_text = observation_info.chat_history_str
- if observation_info.new_messages_count > 0 and observation_info.unprocessed_messages:
- new_messages_list = observation_info.unprocessed_messages
- new_messages_str = build_readable_messages(
- new_messages_list,
- replace_bot_name=True,
- merge_messages=False,
- timestamp_mode="relative",
- read_mark=0.0,
- )
- chat_history_text += f"\n--- 以下是 {observation_info.new_messages_count} 条新消息 ---\n{new_messages_str}"
- elif not chat_history_text:
- chat_history_text = "还没有聊天记录。"
-
- # 构建 Persona 文本 (persona_text)
- persona_text = f"你的名字是{self.name},{self.personality_info}。"
-
- # --- 选择 Prompt ---
- if action_type == "send_new_message":
- prompt_template = PROMPT_SEND_NEW_MESSAGE
- logger.info(f"[私聊][{self.private_name}]使用 PROMPT_SEND_NEW_MESSAGE (追问生成)")
- elif action_type == "say_goodbye": # 处理告别动作
- prompt_template = PROMPT_FAREWELL
- logger.info(f"[私聊][{self.private_name}]使用 PROMPT_FAREWELL (告别语生成)")
- else: # 默认使用 direct_reply 的 prompt (包括 'direct_reply' 或其他未明确处理的类型)
- prompt_template = PROMPT_DIRECT_REPLY
- logger.info(f"[私聊][{self.private_name}]使用 PROMPT_DIRECT_REPLY (首次/非连续回复生成)")
-
- # --- 格式化最终的 Prompt ---
- prompt = prompt_template.format(
- persona_text=persona_text,
- goals_str=goals_str,
- chat_history_text=chat_history_text,
- knowledge_info_str=knowledge_info_str,
- )
-
- # --- 调用 LLM 生成 ---
- logger.debug(f"[私聊][{self.private_name}]发送到LLM的生成提示词:\n------\n{prompt}\n------")
- try:
- content, _ = await self.llm.generate_response_async(prompt)
- logger.debug(f"[私聊][{self.private_name}]生成的回复: {content}")
- # 移除旧的检查新消息逻辑,这应该由 conversation 控制流处理
- return content
-
- except Exception as e:
- logger.error(f"[私聊][{self.private_name}]生成回复时出错: {e}")
- return "抱歉,我现在有点混乱,让我重新思考一下..."
-
- # check_reply 方法保持不变
- async def check_reply(
- self, reply: str, goal: str, chat_history: List[Dict[str, Any]], chat_history_str: str, retry_count: int = 0
- ) -> Tuple[bool, str, bool]:
- """检查回复是否合适
- (此方法逻辑保持不变)
- """
- return await self.reply_checker.check(reply, goal, chat_history, chat_history_str, retry_count)
diff --git a/src/experimental/PFC/waiter.py b/src/experimental/PFC/waiter.py
deleted file mode 100644
index 530a48a4..00000000
--- a/src/experimental/PFC/waiter.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from src.common.logger import get_logger
-from .chat_observer import ChatObserver
-from .conversation_info import ConversationInfo
-
-# from src.individuality.individuality get_individuality,Individuality # 不再需要
-from src.config.config import global_config
-import time
-import asyncio
-
-logger = get_logger("waiter")
-
-# --- 在这里设定你想要的超时时间(秒) ---
-# 例如: 120 秒 = 2 分钟
-DESIRED_TIMEOUT_SECONDS = 300
-
-
-class Waiter:
- """等待处理类"""
-
- def __init__(self, stream_id: str, private_name: str):
- self.chat_observer = ChatObserver.get_instance(stream_id, private_name)
- self.name = global_config.bot.nickname
- self.private_name = private_name
- # self.wait_accumulated_time = 0 # 不再需要累加计时
-
- async def wait(self, conversation_info: ConversationInfo) -> bool:
- """等待用户新消息或超时"""
- wait_start_time = time.time()
- logger.info(f"[私聊][{self.private_name}]进入常规等待状态 (超时: {DESIRED_TIMEOUT_SECONDS} 秒)...")
-
- while True:
- # 检查是否有新消息
- if self.chat_observer.new_message_after(wait_start_time):
- logger.info(f"[私聊][{self.private_name}]等待结束,收到新消息")
- return False # 返回 False 表示不是超时
-
- # 检查是否超时
- elapsed_time = time.time() - wait_start_time
- if elapsed_time > DESIRED_TIMEOUT_SECONDS:
- logger.info(f"[私聊][{self.private_name}]等待超过 {DESIRED_TIMEOUT_SECONDS} 秒...添加思考目标。")
- wait_goal = {
- "goal": f"你等待了{elapsed_time / 60:.1f}分钟,注意可能在对方看来聊天已经结束,思考接下来要做什么",
- "reasoning": "对方很久没有回复你的消息了",
- }
- conversation_info.goal_list.append(wait_goal)
- logger.info(f"[私聊][{self.private_name}]添加目标: {wait_goal}")
- return True # 返回 True 表示超时
-
- await asyncio.sleep(5) # 每 5 秒检查一次
- logger.debug(
- f"[私聊][{self.private_name}]等待中..."
- ) # 可以考虑把这个频繁日志注释掉,只在超时或收到消息时输出
-
- async def wait_listening(self, conversation_info: ConversationInfo) -> bool:
- """倾听用户发言或超时"""
- wait_start_time = time.time()
- logger.info(f"[私聊][{self.private_name}]进入倾听等待状态 (超时: {DESIRED_TIMEOUT_SECONDS} 秒)...")
-
- while True:
- # 检查是否有新消息
- if self.chat_observer.new_message_after(wait_start_time):
- logger.info(f"[私聊][{self.private_name}]倾听等待结束,收到新消息")
- return False # 返回 False 表示不是超时
-
- # 检查是否超时
- elapsed_time = time.time() - wait_start_time
- if elapsed_time > DESIRED_TIMEOUT_SECONDS:
- logger.info(f"[私聊][{self.private_name}]倾听等待超过 {DESIRED_TIMEOUT_SECONDS} 秒...添加思考目标。")
- wait_goal = {
- # 保持 goal 文本一致
- "goal": f"你等待了{elapsed_time / 60:.1f}分钟,对方似乎话说一半突然消失了,可能忙去了?也可能忘记了回复?要问问吗?还是结束对话?或继续等待?思考接下来要做什么",
- "reasoning": "对方话说一半消失了,很久没有回复",
- }
- conversation_info.goal_list.append(wait_goal)
- logger.info(f"[私聊][{self.private_name}]添加目标: {wait_goal}")
- return True # 返回 True 表示超时
-
- await asyncio.sleep(5) # 每 5 秒检查一次
- logger.debug(f"[私聊][{self.private_name}]倾听等待中...") # 同上,可以考虑注释掉
diff --git a/src/experimental/only_message_process.py b/src/experimental/only_message_process.py
deleted file mode 100644
index e5ca6b82..00000000
--- a/src/experimental/only_message_process.py
+++ /dev/null
@@ -1,70 +0,0 @@
-from src.common.logger import get_logger
-from src.chat.message_receive.message import MessageRecv
-from src.chat.message_receive.storage import MessageStorage
-from src.config.config import global_config
-from src.chat.message_receive.chat_stream import ChatStream
-
-from maim_message import UserInfo
-from datetime import datetime
-import re
-
-logger = get_logger("pfc")
-
-
-class MessageProcessor:
- """消息处理器,负责处理接收到的消息并存储"""
-
- def __init__(self):
- self.storage = MessageStorage()
-
- @staticmethod
- def _check_ban_words(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
- """检查消息中是否包含过滤词"""
- for word in global_config.message_receive.ban_words:
- if word in text:
- logger.info(
- f"[{chat.group_info.group_name if chat.group_info else '私聊'}]{userinfo.user_nickname}:{text}"
- )
- logger.info(f"[过滤词识别]消息中含有{word},filtered")
- return True
- return False
-
- @staticmethod
- def _check_ban_regex(text: str, chat: ChatStream, userinfo: UserInfo) -> bool:
- """检查消息是否匹配过滤正则表达式"""
- for pattern in global_config.message_receive.ban_msgs_regex:
- if re.search(pattern, text):
- chat_name = chat.group_info.group_name if chat.group_info else "私聊"
- logger.info(f"[{chat_name}]{userinfo.user_nickname}:{text}")
- logger.info(f"[正则表达式过滤]消息匹配到{pattern},filtered")
- return True
- return False
-
- async def process_message(self, message: MessageRecv) -> None:
- """处理消息并存储
-
- Args:
- message: 消息对象
- """
- userinfo = message.message_info.user_info
- chat = message.chat_stream
-
- # 处理消息
- await message.process()
-
- # 过滤词/正则表达式过滤
- if self._check_ban_words(message.processed_plain_text, chat, userinfo) or self._check_ban_regex(
- message.raw_message, chat, userinfo
- ):
- return
-
- # 存储消息
- await self.storage.store_message(message, chat)
-
- # 打印消息信息
- mes_name = chat.group_info.group_name if chat.group_info else "私聊"
- # 将时间戳转换为datetime对象
- current_time = datetime.fromtimestamp(message.message_info.time).strftime("%H:%M:%S")
- logger.info(
- f"[{current_time}][{mes_name}]{message.message_info.user_info.user_nickname}: {message.processed_plain_text}"
- )
diff --git a/src/individuality/identity.py b/src/individuality/identity.py
deleted file mode 100644
index bb312598..00000000
--- a/src/individuality/identity.py
+++ /dev/null
@@ -1,30 +0,0 @@
-from dataclasses import dataclass
-from typing import List
-
-
-@dataclass
-class Identity:
- """身份特征类"""
-
- identity_detail: List[str] # 身份细节描述
-
- def __init__(self, identity_detail: List[str] = None):
- """初始化身份特征
-
- Args:
- identity_detail: 身份细节描述列表
- """
- if identity_detail is None:
- identity_detail = []
- self.identity_detail = identity_detail
-
- def to_dict(self) -> dict:
- """将身份特征转换为字典格式"""
- return {
- "identity_detail": self.identity_detail,
- }
-
- @classmethod
- def from_dict(cls, data: dict) -> "Identity":
- """从字典创建身份特征实例"""
- return cls(identity_detail=data.get("identity_detail", []))
diff --git a/src/individuality/individuality.py b/src/individuality/individuality.py
index 8365c088..fc7156e1 100644
--- a/src/individuality/individuality.py
+++ b/src/individuality/individuality.py
@@ -1,17 +1,13 @@
-from typing import Optional
-import ast
-
-from src.llm_models.utils_model import LLMRequest
-from .personality import Personality
-from .identity import Identity
-import random
import json
import os
import hashlib
-from rich.traceback import install
+import time
+
from src.common.logger import get_logger
-from src.person_info.person_info import get_person_info_manager
from src.config.config import global_config
+from src.llm_models.utils_model import LLMRequest
+from src.person_info.person_info import get_person_info_manager
+from rich.traceback import install
install(extra_lines=3)
@@ -22,300 +18,88 @@ class Individuality:
"""个体特征管理类"""
def __init__(self):
- # 正常初始化实例属性
- self.personality: Optional[Personality] = None
- self.identity: Optional[Identity] = None
-
self.name = ""
self.bot_person_id = ""
self.meta_info_file_path = "data/personality/meta.json"
+ self.personality_data_file_path = "data/personality/personality_data.json"
self.model = LLMRequest(
model=global_config.model.utils,
request_type="individuality.compress",
)
- async def initialize(
- self,
- bot_nickname: str,
- personality_core: str,
- personality_sides: list,
- identity_detail: list,
- ) -> None:
- """初始化个体特征
+ async def initialize(self) -> None:
+ """初始化个体特征"""
+ bot_nickname = global_config.bot.nickname
+ personality_core = global_config.personality.personality_core
+ personality_side = global_config.personality.personality_side
+ identity = global_config.personality.identity
+
- Args:
- bot_nickname: 机器人昵称
- personality_core: 人格核心特点
- personality_sides: 人格侧面描述
- identity_detail: 身份细节描述
- """
- logger.info("正在初始化个体特征")
person_info_manager = get_person_info_manager()
self.bot_person_id = person_info_manager.get_person_id("system", "bot_id")
self.name = bot_nickname
# 检查配置变化,如果变化则清空
personality_changed, identity_changed = await self._check_config_and_clear_if_changed(
- bot_nickname, personality_core, personality_sides, identity_detail
+ bot_nickname, personality_core, personality_side, identity
)
- # 初始化人格
- self.personality = Personality.initialize(
- bot_nickname=bot_nickname, personality_core=personality_core, personality_sides=personality_sides
- )
+ logger.info("正在构建人设信息")
- # 初始化身份
- self.identity = Identity(identity_detail=identity_detail)
+ # 如果配置有变化,重新生成压缩版本
+ if personality_changed or identity_changed:
+ logger.info("检测到配置变化,重新生成压缩版本")
+ personality_result = await self._create_personality(personality_core, personality_side)
+ identity_result = await self._create_identity(identity)
+ else:
+ logger.info("配置未变化,使用缓存版本")
+ # 从文件中获取已有的结果
+ personality_result, identity_result = self._get_personality_from_file()
+ if not personality_result or not identity_result:
+ logger.info("未找到有效缓存,重新生成")
+ personality_result = await self._create_personality(personality_core, personality_side)
+ identity_result = await self._create_identity(identity)
- logger.info("正在将所有人设写入impression")
- # 将所有人设写入impression
- impression_parts = []
- if personality_core:
- impression_parts.append(f"核心人格: {personality_core}")
- if personality_sides:
- impression_parts.append(f"人格侧面: {'、'.join(personality_sides)}")
- if identity_detail:
- impression_parts.append(f"身份: {'、'.join(identity_detail)}")
- logger.info(f"impression_parts: {impression_parts}")
+ # 保存到文件
+ if personality_result and identity_result:
+ self._save_personality_to_file(personality_result, identity_result)
+ logger.info("已将人设构建并保存到文件")
+ else:
+ logger.error("人设构建失败")
- impression_text = "。".join(impression_parts)
- if impression_text:
- impression_text += "。"
-
- if impression_text:
+ # 如果任何一个发生变化,都需要清空数据库中的info_list(因为这影响整体人设)
+ if personality_changed or identity_changed:
+ logger.info("将清空数据库中原有的关键词缓存")
update_data = {
"platform": "system",
"user_id": "bot_id",
"person_name": self.name,
"nickname": self.name,
}
+ await person_info_manager.update_one_field(self.bot_person_id, "info_list", [], data=update_data)
- await person_info_manager.update_one_field(
- self.bot_person_id, "impression", impression_text, data=update_data
- )
- logger.debug("已将完整人设更新到bot的impression中")
-
- # 根据变化情况决定是否重新创建
- personality_result = None
- identity_result = None
-
- if personality_changed:
- logger.info("检测到人格配置变化,重新生成压缩版本")
- personality_result = await self._create_personality(personality_core, personality_sides)
+ async def get_personality_block(self) -> str:
+ bot_name = global_config.bot.nickname
+ if global_config.bot.alias_names:
+ bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
else:
- logger.info("人格配置未变化,使用缓存版本")
- # 从缓存中获取已有的personality结果
- existing_short_impression = await person_info_manager.get_value(self.bot_person_id, "short_impression")
- if existing_short_impression:
- try:
- existing_data = ast.literal_eval(existing_short_impression)
- if isinstance(existing_data, list) and len(existing_data) >= 1:
- personality_result = existing_data[0]
- except (json.JSONDecodeError, TypeError, IndexError):
- logger.warning("无法解析现有的short_impression,将重新生成人格部分")
- personality_result = await self._create_personality(personality_core, personality_sides)
- else:
- logger.info("未找到现有的人格缓存,重新生成")
- personality_result = await self._create_personality(personality_core, personality_sides)
-
- if identity_changed:
- logger.info("检测到身份配置变化,重新生成压缩版本")
- identity_result = await self._create_identity(identity_detail)
- else:
- logger.info("身份配置未变化,使用缓存版本")
- # 从缓存中获取已有的identity结果
- existing_short_impression = await person_info_manager.get_value(self.bot_person_id, "short_impression")
- if existing_short_impression:
- try:
- existing_data = ast.literal_eval(existing_short_impression)
- if isinstance(existing_data, list) and len(existing_data) >= 2:
- identity_result = existing_data[1]
- except (json.JSONDecodeError, TypeError, IndexError):
- logger.warning("无法解析现有的short_impression,将重新生成身份部分")
- identity_result = await self._create_identity(identity_detail)
- else:
- logger.info("未找到现有的身份缓存,重新生成")
- identity_result = await self._create_identity(identity_detail)
-
- result = [personality_result, identity_result]
-
- # 更新short_impression字段
- if personality_result and identity_result:
- person_info_manager = get_person_info_manager()
- await person_info_manager.update_one_field(self.bot_person_id, "short_impression", result)
- logger.info("已将人设构建")
- else:
- logger.error("人设构建失败")
-
- def to_dict(self) -> dict:
- """将个体特征转换为字典格式"""
- return {
- "personality": self.personality.to_dict() if self.personality else None,
- "identity": self.identity.to_dict() if self.identity else None,
- }
-
- @classmethod
- def from_dict(cls, data: dict) -> "Individuality":
- """从字典创建个体特征实例"""
- instance = cls()
- if data.get("personality"):
- instance.personality = Personality.from_dict(data["personality"])
- if data.get("identity"):
- instance.identity = Identity.from_dict(data["identity"])
- return instance
-
- def get_personality_prompt(self, level: int, x_person: int = 2) -> str:
- """
- 获取人格特征的prompt
-
- Args:
- level (int): 详细程度 (1: 核心, 2: 核心+随机侧面, 3: 核心+所有侧面)
- x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
-
- Returns:
- str: 生成的人格prompt字符串
- """
- if x_person not in [0, 1, 2]:
- return "无效的人称代词,请使用 0 (无人称), 1 (我) 或 2 (你)。"
- if not self.personality:
- return "人格特征尚未初始化。"
-
- if x_person == 2:
- p_pronoun = "你"
- prompt_personality = f"{p_pronoun}{self.personality.personality_core}"
- elif x_person == 1:
- p_pronoun = "我"
- prompt_personality = f"{p_pronoun}{self.personality.personality_core}"
- else: # x_person == 0
- # 对于无人称,直接描述核心特征
- prompt_personality = f"{self.personality.personality_core}"
-
- # 根据level添加人格侧面
- if level >= 2 and self.personality.personality_sides:
- personality_sides = list(self.personality.personality_sides)
- random.shuffle(personality_sides)
- if level == 2:
- prompt_personality += f",有时也会{personality_sides[0]}"
- elif level == 3:
- sides_str = "、".join(personality_sides)
- prompt_personality += f",有时也会{sides_str}"
- prompt_personality += "。"
- return prompt_personality
-
- def get_identity_prompt(self, level: int, x_person: int = 2) -> str:
- """
- 获取身份特征的prompt
-
- Args:
- level (int): 详细程度 (1: 随机细节, 2: 所有细节, 3: 同2)
- x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
-
- Returns:
- str: 生成的身份prompt字符串
- """
- if x_person not in [0, 1, 2]:
- return "无效的人称代词,请使用 0 (无人称), 1 (我) 或 2 (你)。"
- if not self.identity:
- return "身份特征尚未初始化。"
-
- if x_person == 2:
- i_pronoun = "你"
- elif x_person == 1:
- i_pronoun = "我"
- else: # x_person == 0
- i_pronoun = "" # 无人称
-
- identity_parts = []
-
- # 根据level添加身份细节
- if level >= 1 and self.identity.identity_detail:
- identity_detail = list(self.identity.identity_detail)
- random.shuffle(identity_detail)
- if level == 1:
- identity_parts.append(f"{identity_detail[0]}")
- elif level >= 2:
- details_str = "、".join(identity_detail)
- identity_parts.append(f"{details_str}")
-
- if identity_parts:
- details_str = ",".join(identity_parts)
- if x_person in [1, 2]:
- return f"{i_pronoun},{details_str}。"
- else: # x_person == 0
- # 无人称时,直接返回细节,不加代词和开头的逗号
- return f"{details_str}。"
- else:
- if x_person in [1, 2]:
- return f"{i_pronoun}的身份信息不完整。"
- else: # x_person == 0
- return "身份信息不完整。"
-
- def get_prompt(self, level: int, x_person: int = 2) -> str:
- """
- 获取合并的个体特征prompt
-
- Args:
- level (int): 详细程度 (1: 核心/随机细节, 2: 核心+随机侧面/全部细节, 3: 全部)
- x_person (int, optional): 人称代词 (0: 无人称, 1: 我, 2: 你). 默认为 2.
-
- Returns:
- str: 生成的合并prompt字符串
- """
- if x_person not in [0, 1, 2]:
- return "无效的人称代词,请使用 0 (无人称), 1 (我) 或 2 (你)。"
-
- if not self.personality or not self.identity:
- return "个体特征尚未完全初始化。"
-
- # 调用新的独立方法
- prompt_personality = self.get_personality_prompt(level, x_person)
- prompt_identity = self.get_identity_prompt(level, x_person)
-
- # 移除可能存在的错误信息,只合并有效的 prompt
- valid_prompts = []
- if "尚未初始化" not in prompt_personality and "无效的人称" not in prompt_personality:
- valid_prompts.append(prompt_personality)
- if (
- "尚未初始化" not in prompt_identity
- and "无效的人称" not in prompt_identity
- and "信息不完整" not in prompt_identity
- ):
- # 从身份 prompt 中移除代词和句号,以便更好地合并
- identity_content = prompt_identity
- if x_person == 2 and identity_content.startswith("你,"):
- identity_content = identity_content[2:]
- elif x_person == 1 and identity_content.startswith("我,"):
- identity_content = identity_content[2:]
- # 对于 x_person == 0,身份提示不带前缀,无需移除
-
- if identity_content.endswith("。"):
- identity_content = identity_content[:-1]
- valid_prompts.append(identity_content)
-
- # --- 合并 Prompt ---
- final_prompt = " ".join(valid_prompts)
-
- return final_prompt.strip()
-
- def get_traits(self, factor):
- """
- 获取个体特征的特质
- """
- if factor == "openness":
- return self.personality.openness
- elif factor == "conscientiousness":
- return self.personality.conscientiousness
- elif factor == "extraversion":
- return self.personality.extraversion
- elif factor == "agreeableness":
- return self.personality.agreeableness
- elif factor == "neuroticism":
- return self.personality.neuroticism
- return None
+ bot_nickname = ""
+
+ # 从文件获取 short_impression
+ personality, identity = self._get_personality_from_file()
+
+ # 确保short_impression是列表格式且有足够的元素
+ if not personality or not identity:
+ logger.warning(f"personality或identity为空: {personality}, {identity}, 使用默认值")
+ personality = "友好活泼"
+ identity = "人类"
+
+ prompt_personality = f"{personality}\n{identity}"
+ return f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}"
def _get_config_hash(
- self, bot_nickname: str, personality_core: str, personality_sides: list, identity_detail: list
+ self, bot_nickname: str, personality_core: str, personality_side: str, identity: str
) -> tuple[str, str]:
"""获取personality和identity配置的哈希值
@@ -326,7 +110,7 @@ class Individuality:
personality_config = {
"nickname": bot_nickname,
"personality_core": personality_core,
- "personality_sides": sorted(personality_sides),
+ "personality_side": personality_side,
"compress_personality": global_config.personality.compress_personality,
}
personality_str = json.dumps(personality_config, sort_keys=True)
@@ -334,8 +118,8 @@ class Individuality:
# 身份配置哈希
identity_config = {
- "identity_detail": sorted(identity_detail),
- "compress_identity": global_config.identity.compress_indentity,
+ "identity": identity,
+ "compress_identity": global_config.personality.compress_identity,
}
identity_str = json.dumps(identity_config, sort_keys=True)
identity_hash = hashlib.md5(identity_str.encode("utf-8")).hexdigest()
@@ -343,7 +127,7 @@ class Individuality:
return personality_hash, identity_hash
async def _check_config_and_clear_if_changed(
- self, bot_nickname: str, personality_core: str, personality_sides: list, identity_detail: list
+ self, bot_nickname: str, personality_core: str, personality_side: str, identity: str
) -> tuple[bool, bool]:
"""检查配置是否发生变化,如果变化则清空相应缓存
@@ -352,7 +136,7 @@ class Individuality:
"""
person_info_manager = get_person_info_manager()
current_personality_hash, current_identity_hash = self._get_config_hash(
- bot_nickname, personality_core, personality_sides, identity_detail
+ bot_nickname, personality_core, personality_side, identity
)
meta_info = self._load_meta_info()
@@ -408,53 +192,60 @@ class Individuality:
except IOError as e:
logger.error(f"保存meta_info文件失败: {e}")
- async def get_keyword_info(self, keyword: str) -> str:
- """获取指定关键词的信息
+ def _load_personality_data(self) -> dict:
+ """从JSON文件中加载personality数据"""
+ if os.path.exists(self.personality_data_file_path):
+ try:
+ with open(self.personality_data_file_path, "r", encoding="utf-8") as f:
+ return json.load(f)
+ except (json.JSONDecodeError, IOError) as e:
+ logger.error(f"读取personality_data文件失败: {e}, 将创建新文件。")
+ return {}
+ return {}
- Args:
- keyword: 关键词
+ def _save_personality_data(self, personality_data: dict):
+ """将personality数据保存到JSON文件"""
+ try:
+ os.makedirs(os.path.dirname(self.personality_data_file_path), exist_ok=True)
+ with open(self.personality_data_file_path, "w", encoding="utf-8") as f:
+ json.dump(personality_data, f, ensure_ascii=False, indent=2)
+ logger.debug(f"已保存personality数据到文件: {self.personality_data_file_path}")
+ except IOError as e:
+ logger.error(f"保存personality_data文件失败: {e}")
+ def _get_personality_from_file(self) -> tuple[str, str]:
+ """从文件获取personality数据
+
Returns:
- str: 随机选择的一条信息,如果没有则返回空字符串
+ tuple: (personality, identity)
"""
- person_info_manager = get_person_info_manager()
- info_list_json = await person_info_manager.get_value(self.bot_person_id, "info_list")
- if info_list_json:
- try:
- # get_value might return a pre-deserialized list if it comes from a cache,
- # or a JSON string if it comes from DB.
- info_list = json.loads(info_list_json) if isinstance(info_list_json, str) else info_list_json
+ personality_data = self._load_personality_data()
+ personality = personality_data.get("personality", "友好活泼")
+ identity = personality_data.get("identity", "人类")
+ return personality, identity
- for item in info_list:
- if isinstance(item, dict) and item.get("info_type") == keyword:
- return item.get("info_content", "")
- except (json.JSONDecodeError, TypeError):
- logger.error(f"解析info_list失败: {info_list_json}")
- return ""
- return ""
+ def _save_personality_to_file(self, personality: str, identity: str):
+ """保存personality数据到文件
+
+ Args:
+ personality: 压缩后的人格描述
+ identity: 压缩后的身份描述
+ """
+ personality_data = {
+ "personality": personality,
+ "identity": identity,
+ "bot_nickname": self.name,
+ "last_updated": int(time.time())
+ }
+ self._save_personality_data(personality_data)
- async def get_all_keywords(self) -> list:
- """获取所有已缓存的关键词列表"""
- person_info_manager = get_person_info_manager()
- info_list_json = await person_info_manager.get_value(self.bot_person_id, "info_list")
- keywords = []
- if info_list_json:
- try:
- info_list = json.loads(info_list_json) if isinstance(info_list_json, str) else info_list_json
- for item in info_list:
- if isinstance(item, dict) and "info_type" in item:
- keywords.append(item["info_type"])
- except (json.JSONDecodeError, TypeError):
- logger.error(f"解析info_list失败: {info_list_json}")
- return keywords
-
- async def _create_personality(self, personality_core: str, personality_sides: list) -> str:
+ async def _create_personality(self, personality_core: str, personality_side: str) -> str:
+ # sourcery skip: merge-list-append, move-assign
"""使用LLM创建压缩版本的impression
Args:
personality_core: 核心人格
- personality_sides: 人格侧面列表
- identity_detail: 身份细节列表
+ personality_side: 人格侧面列表
Returns:
str: 压缩后的impression文本
@@ -468,11 +259,9 @@ class Individuality:
# 准备需要压缩的内容
if global_config.personality.compress_personality:
- personality_to_compress = []
- if personality_sides:
- personality_to_compress.append(f"人格特质: {'、'.join(personality_sides)}")
+ personality_to_compress = f"人格特质: {personality_side}"
- prompt = f"""请将以下人格信息进行简洁压缩,保留主要内容,用简练的中文表达:
+ prompt = f"""请将以下人格信息进行简洁压缩,保留主要内容,用简练的中文表达:
{personality_to_compress}
要求:
@@ -480,34 +269,32 @@ class Individuality:
2. 尽量简洁,不超过30字
3. 直接输出压缩后的内容,不要解释"""
- response, (_, _) = await self.model.generate_response_async(
- prompt=prompt,
- )
+ response, (_, _) = await self.model.generate_response_async(
+ prompt=prompt,
+ )
- if response.strip():
- personality_parts.append(response.strip())
- logger.info(f"精简人格侧面: {response.strip()}")
- else:
- logger.error(f"使用LLM压缩人设时出错: {response}")
- if personality_parts:
- personality_result = "。".join(personality_parts)
- else:
- personality_result = personality_core
+ if response.strip():
+ personality_parts.append(response.strip())
+ logger.info(f"精简人格侧面: {response.strip()}")
+ else:
+ logger.error(f"使用LLM压缩人设时出错: {response}")
+ if personality_parts:
+ personality_result = "。".join(personality_parts)
+ else:
+ personality_result = personality_core
else:
personality_result = personality_core
- if personality_sides:
- personality_result += ",".join(personality_sides)
+ if personality_side:
+ personality_result += f",{personality_side}"
return personality_result
- async def _create_identity(self, identity_detail: list) -> str:
+ async def _create_identity(self, identity: str) -> str:
"""使用LLM创建压缩版本的impression"""
logger.info("正在构建身份.........")
- if global_config.identity.compress_indentity:
- identity_to_compress = []
- if identity_detail:
- identity_to_compress.append(f"身份背景: {'、'.join(identity_detail)}")
+ if global_config.personality.compress_identity:
+ identity_to_compress = f"身份背景: {identity}"
prompt = f"""请将以下身份信息进行简洁压缩,保留主要内容,用简练的中文表达:
{identity_to_compress}
@@ -527,7 +314,7 @@ class Individuality:
else:
logger.error(f"使用LLM压缩身份时出错: {response}")
else:
- identity_result = "。".join(identity_detail)
+ identity_result = "。".join(identity)
return identity_result
diff --git a/src/individuality/not_using/offline_llm.py b/src/individuality/not_using/offline_llm.py
index 83cb263c..2bafb69a 100644
--- a/src/individuality/not_using/offline_llm.py
+++ b/src/individuality/not_using/offline_llm.py
@@ -39,7 +39,7 @@ class LLMRequestOff:
}
# 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
+ api_url = f"{self.base_url.rstrip('/')}/chat/completions" # type: ignore
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
@@ -89,7 +89,7 @@ class LLMRequestOff:
}
# 发送请求到完整的 chat/completions 端点
- api_url = f"{self.base_url.rstrip('/')}/chat/completions"
+ api_url = f"{self.base_url.rstrip('/')}/chat/completions" # type: ignore
logger.info(f"Request URL: {api_url}") # 记录请求的 URL
max_retries = 3
diff --git a/src/individuality/not_using/per_bf_gen.py b/src/individuality/not_using/per_bf_gen.py
index 2d0961cb..aedbe00e 100644
--- a/src/individuality/not_using/per_bf_gen.py
+++ b/src/individuality/not_using/per_bf_gen.py
@@ -33,10 +33,10 @@ else:
def adapt_scene(scene: str) -> str:
personality_core = config["personality"]["personality_core"]
- personality_sides = config["personality"]["personality_sides"]
- personality_side = random.choice(personality_sides)
- identity_details = config["identity"]["identity_detail"]
- identity_detail = random.choice(identity_details)
+ personality_side = config["personality"]["personality_side"]
+ personality_side = random.choice(personality_side)
+ identitys = config["identity"]["identity"]
+ identity = random.choice(identitys)
"""
根据config中的属性,改编场景使其更适合当前角色
@@ -56,7 +56,7 @@ def adapt_scene(scene: str) -> str:
- 外貌: {config["identity"]["appearance"]}
- 性格核心: {personality_core}
- 性格侧面: {personality_side}
-- 身份细节: {identity_detail}
+- 身份细节: {identity}
请根据上述形象,改编以下场景,在测评中,用户将根据该场景给出上述角色形象的反应:
{scene}
@@ -83,8 +83,8 @@ class PersonalityEvaluatorDirect:
def __init__(self):
self.personality_traits = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
self.scenarios = []
- self.final_scores = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
- self.dimension_counts = {trait: 0 for trait in self.final_scores.keys()}
+ self.final_scores: Dict[str, float] = {"开放性": 0, "严谨性": 0, "外向性": 0, "宜人性": 0, "神经质": 0}
+ self.dimension_counts = {trait: 0 for trait in self.final_scores}
# 为每个人格特质获取对应的场景
for trait in PERSONALITY_SCENES:
@@ -119,8 +119,7 @@ class PersonalityEvaluatorDirect:
# 构建维度描述
dimension_descriptions = []
for dim in dimensions:
- desc = FACTOR_DESCRIPTIONS.get(dim, "")
- if desc:
+ if desc := FACTOR_DESCRIPTIONS.get(dim, ""):
dimension_descriptions.append(f"- {dim}:{desc}")
dimensions_text = "\n".join(dimension_descriptions)
@@ -180,8 +179,8 @@ class PersonalityEvaluatorDirect:
print("\n角色基本信息:")
print(f"- 昵称:{config['bot']['nickname']}")
print(f"- 性格核心:{config['personality']['personality_core']}")
- print(f"- 性格侧面:{config['personality']['personality_sides']}")
- print(f"- 身份细节:{config['identity']['identity_detail']}")
+ print(f"- 性格侧面:{config['personality']['personality_side']}")
+ print(f"- 身份细节:{config['identity']['identity']}")
print("\n准备好了吗?按回车键开始...")
input()
@@ -262,8 +261,8 @@ class PersonalityEvaluatorDirect:
"weight": config["identity"]["weight"],
"appearance": config["identity"]["appearance"],
"personality_core": config["personality"]["personality_core"],
- "personality_sides": config["personality"]["personality_sides"],
- "identity_detail": config["identity"]["identity_detail"],
+ "personality_side": config["personality"]["personality_side"],
+ "identity": config["identity"]["identity"],
},
}
diff --git a/src/individuality/template_scene.json b/src/individuality/not_using/template_scene.json
similarity index 100%
rename from src/individuality/template_scene.json
rename to src/individuality/not_using/template_scene.json
diff --git a/src/individuality/personality.py b/src/individuality/personality.py
deleted file mode 100644
index 0ee46a3d..00000000
--- a/src/individuality/personality.py
+++ /dev/null
@@ -1,120 +0,0 @@
-from dataclasses import dataclass
-from typing import Dict, List
-import json
-from pathlib import Path
-
-
-@dataclass
-class Personality:
- """人格特质类"""
-
- openness: float # 开放性
- conscientiousness: float # 尽责性
- extraversion: float # 外向性
- agreeableness: float # 宜人性
- neuroticism: float # 神经质
- bot_nickname: str # 机器人昵称
- personality_core: str # 人格核心特点
- personality_sides: List[str] # 人格侧面描述
-
- _instance = None
-
- def __new__(cls, *args, **kwargs):
- if cls._instance is None:
- cls._instance = super().__new__(cls)
- return cls._instance
-
- def __init__(self, personality_core: str = "", personality_sides: List[str] = None):
- if personality_sides is None:
- personality_sides = []
- self.personality_core = personality_core
- self.personality_sides = personality_sides
-
- @classmethod
- def get_instance(cls) -> "Personality":
- """获取Personality单例实例
-
- Returns:
- Personality: 单例实例
- """
- if cls._instance is None:
- cls._instance = cls()
- return cls._instance
-
- def _init_big_five_personality(self):
- """初始化大五人格特质"""
- # 构建文件路径
- personality_file = Path("data/personality") / f"{self.bot_nickname}_personality.per"
-
- # 如果文件存在,读取文件
- if personality_file.exists():
- with open(personality_file, "r", encoding="utf-8") as f:
- personality_data = json.load(f)
- self.openness = personality_data.get("openness", 0.5)
- self.conscientiousness = personality_data.get("conscientiousness", 0.5)
- self.extraversion = personality_data.get("extraversion", 0.5)
- self.agreeableness = personality_data.get("agreeableness", 0.5)
- self.neuroticism = personality_data.get("neuroticism", 0.5)
- else:
- # 如果文件不存在,根据personality_core和personality_core来设置大五人格特质
- if "活泼" in self.personality_core or "开朗" in self.personality_sides:
- self.extraversion = 0.8
- self.neuroticism = 0.2
- else:
- self.extraversion = 0.3
- self.neuroticism = 0.5
-
- if "认真" in self.personality_core or "负责" in self.personality_sides:
- self.conscientiousness = 0.9
- else:
- self.conscientiousness = 0.5
-
- if "友善" in self.personality_core or "温柔" in self.personality_sides:
- self.agreeableness = 0.9
- else:
- self.agreeableness = 0.5
-
- if "创新" in self.personality_core or "开放" in self.personality_sides:
- self.openness = 0.8
- else:
- self.openness = 0.5
-
- @classmethod
- def initialize(cls, bot_nickname: str, personality_core: str, personality_sides: List[str]) -> "Personality":
- """初始化人格特质
-
- Args:
- bot_nickname: 机器人昵称
- personality_core: 人格核心特点
- personality_sides: 人格侧面描述
-
- Returns:
- Personality: 初始化后的人格特质实例
- """
- instance = cls.get_instance()
- instance.bot_nickname = bot_nickname
- instance.personality_core = personality_core
- instance.personality_sides = personality_sides
- instance._init_big_five_personality()
- return instance
-
- def to_dict(self) -> Dict:
- """将人格特质转换为字典格式"""
- return {
- "openness": self.openness,
- "conscientiousness": self.conscientiousness,
- "extraversion": self.extraversion,
- "agreeableness": self.agreeableness,
- "neuroticism": self.neuroticism,
- "bot_nickname": self.bot_nickname,
- "personality_core": self.personality_core,
- "personality_sides": self.personality_sides,
- }
-
- @classmethod
- def from_dict(cls, data: Dict) -> "Personality":
- """从字典创建人格特质实例"""
- instance = cls.get_instance()
- for key, value in data.items():
- setattr(instance, key, value)
- return instance
diff --git a/src/llm_models/utils_model.py b/src/llm_models/utils_model.py
index 1077cfa0..3621b450 100644
--- a/src/llm_models/utils_model.py
+++ b/src/llm_models/utils_model.py
@@ -2,7 +2,7 @@ import asyncio
import json
import re
from datetime import datetime
-from typing import Tuple, Union, Dict, Any
+from typing import Tuple, Union, Dict, Any, Callable
import aiohttp
from aiohttp.client import ClientResponse
from src.common.logger import get_logger
@@ -216,6 +216,8 @@ class LLMRequest:
prompt: str = None,
image_base64: str = None,
image_format: str = None,
+ file_bytes: bytes = None,
+ file_format: str = None,
payload: dict = None,
retry_policy: dict = None,
) -> Dict[str, Any]:
@@ -225,6 +227,8 @@ class LLMRequest:
prompt: prompt文本
image_base64: 图片的base64编码
image_format: 图片格式
+ file_bytes: 文件的二进制数据
+ file_format: 文件格式
payload: 请求体数据
retry_policy: 自定义重试策略
request_type: 请求类型
@@ -246,30 +250,33 @@ class LLMRequest:
# 构建请求体
if image_base64:
payload = await self._build_payload(prompt, image_base64, image_format)
+ elif file_bytes:
+ payload = await self._build_formdata_payload(file_bytes, file_format)
elif payload is None:
payload = await self._build_payload(prompt)
- if stream_mode:
- payload["stream"] = stream_mode
+ if not file_bytes:
+ if stream_mode:
+ payload["stream"] = stream_mode
- if self.temp != 0.7:
- payload["temperature"] = self.temp
+ if self.temp != 0.7:
+ payload["temperature"] = self.temp
- # 添加enable_thinking参数(如果不是默认值False)
- if not self.enable_thinking:
- payload["enable_thinking"] = False
+ # 添加enable_thinking参数(如果不是默认值False)
+ if not self.enable_thinking:
+ payload["enable_thinking"] = False
- if self.thinking_budget != 4096:
- payload["thinking_budget"] = self.thinking_budget
+ if self.thinking_budget != 4096:
+ payload["thinking_budget"] = self.thinking_budget
- if self.max_tokens:
- payload["max_tokens"] = self.max_tokens
+ if self.max_tokens:
+ payload["max_tokens"] = self.max_tokens
- # if "max_tokens" not in payload and "max_completion_tokens" not in payload:
- # payload["max_tokens"] = global_config.model.model_max_output_length
- # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
- if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
- payload["max_completion_tokens"] = payload.pop("max_tokens")
+ # if "max_tokens" not in payload and "max_completion_tokens" not in payload:
+ # payload["max_tokens"] = global_config.model.model_max_output_length
+ # 如果 payload 中依然存在 max_tokens 且需要转换,在这里进行再次检查
+ if self.model_name.lower() in self.MODELS_NEEDING_TRANSFORMATION and "max_tokens" in payload:
+ payload["max_completion_tokens"] = payload.pop("max_tokens")
return {
"policy": policy,
@@ -278,6 +285,8 @@ class LLMRequest:
"stream_mode": stream_mode,
"image_base64": image_base64, # 保留必要的exception处理所需的原始数据
"image_format": image_format,
+ "file_bytes": file_bytes,
+ "file_format": file_format,
"prompt": prompt,
}
@@ -287,9 +296,11 @@ class LLMRequest:
prompt: str = None,
image_base64: str = None,
image_format: str = None,
+ file_bytes: bytes = None,
+ file_format: str = None,
payload: dict = None,
retry_policy: dict = None,
- response_handler: callable = None,
+ response_handler: Callable = None,
user_id: str = "system",
request_type: str = None,
):
@@ -299,6 +310,8 @@ class LLMRequest:
prompt: prompt文本
image_base64: 图片的base64编码
image_format: 图片格式
+ file_bytes: 文件的二进制数据
+ file_format: 文件格式
payload: 请求体数据
retry_policy: 自定义重试策略
response_handler: 自定义响应处理器
@@ -307,25 +320,34 @@ class LLMRequest:
"""
# 获取请求配置
request_content = await self._prepare_request(
- endpoint, prompt, image_base64, image_format, payload, retry_policy
+ endpoint, prompt, image_base64, image_format, file_bytes, file_format, payload, retry_policy
)
if request_type is None:
request_type = self.request_type
for retry in range(request_content["policy"]["max_retries"]):
try:
# 使用上下文管理器处理会话
- headers = await self._build_headers()
+ if file_bytes:
+ headers = await self._build_headers(is_formdata=True)
+ else:
+ headers = await self._build_headers(is_formdata=False)
# 似乎是openai流式必须要的东西,不过阿里云的qwq-plus加了这个没有影响
if request_content["stream_mode"]:
headers["Accept"] = "text/event-stream"
async with aiohttp.ClientSession(connector=await get_tcp_connector()) as session:
- async with session.post(
- request_content["api_url"], headers=headers, json=request_content["payload"]
- ) as response:
+ post_kwargs = {"headers": headers}
+ # form-data数据上传方式不同
+ if file_bytes:
+ post_kwargs["data"] = request_content["payload"]
+ else:
+ post_kwargs["json"] = request_content["payload"]
+
+ async with session.post(request_content["api_url"], **post_kwargs) as response:
handled_result = await self._handle_response(
response, request_content, retry, response_handler, user_id, request_type, endpoint
)
return handled_result
+
except Exception as e:
handled_payload, count_delta = await self._handle_exception(e, retry, request_content)
retry += count_delta # 降级不计入重试次数
@@ -342,11 +364,11 @@ class LLMRequest:
response: ClientResponse,
request_content: Dict[str, Any],
retry_count: int,
- response_handler: callable,
+ response_handler: Callable,
user_id,
request_type,
endpoint,
- ) -> Union[Dict[str, Any], None]:
+ ):
policy = request_content["policy"]
stream_mode = request_content["stream_mode"]
if response.status in policy["retry_codes"] or response.status in policy["abort_codes"]:
@@ -453,9 +475,7 @@ class LLMRequest:
}
return result
- async def _handle_error_response(
- self, response: ClientResponse, retry_count: int, policy: Dict[str, Any]
- ) -> Union[Dict[str, any]]:
+ async def _handle_error_response(self, response: ClientResponse, retry_count: int, policy: Dict[str, Any]):
if response.status in policy["retry_codes"]:
wait_time = policy["base_wait"] * (2**retry_count)
logger.warning(f"模型 {self.model_name} 错误码: {response.status}, 等待 {wait_time}秒后重试")
@@ -605,7 +625,9 @@ class LLMRequest:
)
# 安全地检查和记录请求详情
handled_payload = await _safely_record(request_content, payload)
- logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload[:100]}")
+ logger.critical(
+ f"请求头: {await self._build_headers(no_key=True)} 请求体: {str(handled_payload)[:100]}"
+ )
raise RuntimeError(
f"模型 {self.model_name} API请求失败: 状态码 {exception.status}, {exception.message}"
)
@@ -619,7 +641,9 @@ class LLMRequest:
logger.critical(f"模型 {self.model_name} 请求失败: {str(exception)}")
# 安全地检查和记录请求详情
handled_payload = await _safely_record(request_content, payload)
- logger.critical(f"请求头: {await self._build_headers(no_key=True)} 请求体: {handled_payload[:100]}")
+ logger.critical(
+ f"请求头: {await self._build_headers(no_key=True)} 请求体: {str(handled_payload)[:100]}"
+ )
raise RuntimeError(f"模型 {self.model_name} API请求失败: {str(exception)}")
async def _transform_parameters(self, params: dict) -> dict:
@@ -640,6 +664,32 @@ class LLMRequest:
new_params["max_completion_tokens"] = new_params.pop("max_tokens")
return new_params
+ async def _build_formdata_payload(self, file_bytes: bytes, file_format: str) -> aiohttp.FormData:
+ """构建form-data请求体"""
+ # 目前只适配了音频文件
+ # 如果后续要支持其他类型的文件,可以在这里添加更多的处理逻辑
+ data = aiohttp.FormData()
+ content_type_list = {
+ "wav": "audio/wav",
+ "mp3": "audio/mpeg",
+ "ogg": "audio/ogg",
+ "flac": "audio/flac",
+ "aac": "audio/aac",
+ }
+
+ content_type = content_type_list.get(file_format)
+ if not content_type:
+ logger.warning(f"暂不支持的文件类型: {file_format}")
+
+ data.add_field(
+ "file",
+ io.BytesIO(file_bytes),
+ filename=f"file.{file_format}",
+ content_type=f"{content_type}", # 根据实际文件类型设置
+ )
+ data.add_field("model", self.model_name)
+ return data
+
async def _build_payload(self, prompt: str, image_base64: str = None, image_format: str = None) -> dict:
"""构建请求体"""
# 复制一份参数,避免直接修改 self.params
@@ -725,7 +775,8 @@ class LLMRequest:
return content, reasoning_content, tool_calls
else:
return content, reasoning_content
-
+ elif "text" in result and result["text"]:
+ return result["text"]
return "没有返回结果", ""
@staticmethod
@@ -739,11 +790,15 @@ class LLMRequest:
reasoning = ""
return content, reasoning
- async def _build_headers(self, no_key: bool = False) -> dict:
+ async def _build_headers(self, no_key: bool = False, is_formdata: bool = False) -> dict:
"""构建请求头"""
if no_key:
+ if is_formdata:
+ return {"Authorization": "Bearer **********"}
return {"Authorization": "Bearer **********", "Content-Type": "application/json"}
else:
+ if is_formdata:
+ return {"Authorization": f"Bearer {self.api_key}"}
return {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
# 防止小朋友们截图自己的key
@@ -761,6 +816,13 @@ class LLMRequest:
content, reasoning_content = response
return content, reasoning_content
+ async def generate_response_for_voice(self, voice_bytes: bytes) -> Tuple:
+ """根据输入的语音文件生成模型的异步响应"""
+ response = await self._execute_request(
+ endpoint="/audio/transcriptions", file_bytes=voice_bytes, file_format="wav"
+ )
+ return response
+
async def generate_response_async(self, prompt: str, **kwargs) -> Union[str, Tuple]:
"""异步方式根据输入的提示生成模型的响应"""
# 构建请求体,不硬编码max_tokens
diff --git a/src/main.py b/src/main.py
index 768913c4..aed9a2bf 100644
--- a/src/main.py
+++ b/src/main.py
@@ -2,31 +2,25 @@ import asyncio
import time
from maim_message import MessageServer
-from src.chat.express.exprssion_learner import get_expression_learner
+from src.chat.express.expression_learner import get_expression_learner
from src.common.remote import TelemetryHeartBeatTask
from src.manager.async_task_manager import async_task_manager
from src.chat.utils.statistic import OnlineTimeRecordTask, StatisticOutputTask
-from src.manager.mood_manager import MoodPrintTask, MoodUpdateTask
from src.chat.emoji_system.emoji_manager import get_emoji_manager
-from src.chat.normal_chat.willing.willing_manager import get_willing_manager
+from src.chat.willing.willing_manager import get_willing_manager
from src.chat.message_receive.chat_stream import get_chat_manager
-from src.chat.heart_flow.heartflow import heartflow
-from src.chat.message_receive.message_sender import message_manager
-from src.chat.message_receive.storage import MessageStorage
from src.config.config import global_config
from src.chat.message_receive.bot import chat_bot
from src.common.logger import get_logger
from src.individuality.individuality import get_individuality, Individuality
from src.common.server import get_global_server, Server
+from src.mood.mood_manager import mood_manager
from rich.traceback import install
# from src.api.main import start_api_server
# 导入新的插件管理器
from src.plugin_system.core.plugin_manager import plugin_manager
-# 导入HFC性能记录器用于日志清理
-from src.chat.focus_chat.hfc_performance_logger import HFCPerformanceLogger
-
# 导入消息API和traceback模块
from src.common.message import get_global_api
@@ -70,11 +64,6 @@ class MainSystem:
"""初始化其他组件"""
init_start_time = time.time()
- # 清理HFC旧日志文件(保持目录大小在50MB以内)
- logger.info("开始清理HFC旧日志文件...")
- HFCPerformanceLogger.cleanup_old_logs(max_size_mb=50.0)
- logger.info("HFC日志清理完成")
-
# 添加在线时间统计任务
await async_task_manager.add_task(OnlineTimeRecordTask())
@@ -89,25 +78,21 @@ class MainSystem:
# logger.info("API服务器启动成功")
# 加载所有actions,包括默认的和插件的
- plugin_count, component_count = plugin_manager.load_all_plugins()
- logger.info(f"插件系统加载成功: {plugin_count} 个插件,{component_count} 个组件")
+ plugin_manager.load_all_plugins()
# 初始化表情管理器
get_emoji_manager().initialize()
logger.info("表情包管理器初始化成功")
- # 添加情绪衰减任务
- await async_task_manager.add_task(MoodUpdateTask())
- # 添加情绪打印任务
- await async_task_manager.add_task(MoodPrintTask())
-
- logger.info("情绪管理器初始化成功")
-
# 启动愿望管理器
await willing_manager.async_task_starter()
logger.info("willing管理器初始化成功")
+ # 启动情绪管理器
+ await mood_manager.start()
+ logger.info("情绪管理器初始化成功")
+
# 初始化聊天管理器
await get_chat_manager()._initialize()
@@ -129,23 +114,9 @@ class MainSystem:
self.app.register_message_handler(chat_bot.message_process)
# 初始化个体特征
- await self.individuality.initialize(
- bot_nickname=global_config.bot.nickname,
- personality_core=global_config.personality.personality_core,
- personality_sides=global_config.personality.personality_sides,
- identity_detail=global_config.identity.identity_detail,
- )
- logger.info("个体特征初始化成功")
+ await self.individuality.initialize()
try:
- # 启动全局消息管理器 (负责消息发送/排队)
- await message_manager.start()
- logger.info("全局消息管理器启动成功")
-
- # 启动心流系统主循环
- asyncio.create_task(heartflow.heartflow_start_working())
- logger.info("心流系统启动成功")
-
init_time = int(1000 * (time.time() - init_start_time))
logger.info(f"初始化完成,神经元放电{init_time}次")
except Exception as e:
@@ -157,7 +128,6 @@ class MainSystem:
while True:
tasks = [
get_emoji_manager().start_periodic_check_register(),
- self.remove_recalled_message_task(),
self.app.run(),
self.server.run(),
]
@@ -181,14 +151,14 @@ class MainSystem:
while True:
await asyncio.sleep(global_config.memory.memory_build_interval)
logger.info("正在进行记忆构建")
- await self.hippocampus_manager.build_memory()
+ await self.hippocampus_manager.build_memory() # type: ignore
async def forget_memory_task(self):
"""记忆遗忘任务"""
while True:
await asyncio.sleep(global_config.memory.forget_memory_interval)
logger.info("[记忆遗忘] 开始遗忘记忆...")
- await self.hippocampus_manager.forget_memory(percentage=global_config.memory.memory_forget_percentage)
+ await self.hippocampus_manager.forget_memory(percentage=global_config.memory.memory_forget_percentage) # type: ignore
logger.info("[记忆遗忘] 记忆遗忘完成")
async def consolidate_memory_task(self):
@@ -196,7 +166,7 @@ class MainSystem:
while True:
await asyncio.sleep(global_config.memory.consolidate_memory_interval)
logger.info("[记忆整合] 开始整合记忆...")
- await self.hippocampus_manager.consolidate_memory()
+ await self.hippocampus_manager.consolidate_memory() # type: ignore
logger.info("[记忆整合] 记忆整合完成")
@staticmethod
@@ -210,23 +180,6 @@ class MainSystem:
await expression_learner.learn_and_store_expression()
logger.info("[表达方式学习] 表达方式学习完成")
- # async def print_mood_task(self):
- # """打印情绪状态"""
- # while True:
- # self.mood_manager.print_mood_status()
- # await asyncio.sleep(60)
-
- @staticmethod
- async def remove_recalled_message_task():
- """删除撤回消息任务"""
- while True:
- try:
- storage = MessageStorage()
- await storage.remove_recalled_message(time.time())
- except Exception:
- logger.exception("删除撤回消息失败")
- await asyncio.sleep(3600)
-
async def main():
"""主函数"""
diff --git a/src/mais4u/config/old/s4u_config_20250715_141713.toml b/src/mais4u/config/old/s4u_config_20250715_141713.toml
new file mode 100644
index 00000000..538fcd88
--- /dev/null
+++ b/src/mais4u/config/old/s4u_config_20250715_141713.toml
@@ -0,0 +1,36 @@
+[inner]
+version = "1.0.0"
+
+#----以下是S4U聊天系统配置文件----
+# S4U (Smart 4 U) 聊天系统是MaiBot的核心对话模块
+# 支持优先级队列、消息中断、VIP用户等高级功能
+#
+# 如果你想要修改配置文件,请在修改后将version的值进行变更
+# 如果新增项目,请参考src/mais4u/s4u_config.py中的S4UConfig类
+#
+# 版本格式:主版本号.次版本号.修订号
+#----S4U配置说明结束----
+
+[s4u]
+# 消息管理配置
+message_timeout_seconds = 120 # 普通消息存活时间(秒),超过此时间的消息将被丢弃
+recent_message_keep_count = 6 # 保留最近N条消息,超出范围的普通消息将被移除
+
+# 优先级系统配置
+at_bot_priority_bonus = 100.0 # @机器人时的优先级加成分数
+vip_queue_priority = true # 是否启用VIP队列优先级系统
+enable_message_interruption = true # 是否允许高优先级消息中断当前回复
+
+# 打字效果配置
+typing_delay = 0.1 # 打字延迟时间(秒),模拟真实打字速度
+enable_dynamic_typing_delay = false # 是否启用基于文本长度的动态打字延迟
+
+# 动态打字延迟参数(仅在enable_dynamic_typing_delay=true时生效)
+chars_per_second = 15.0 # 每秒字符数,用于计算动态打字延迟
+min_typing_delay = 0.2 # 最小打字延迟(秒)
+max_typing_delay = 2.0 # 最大打字延迟(秒)
+
+# 系统功能开关
+enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
+enable_loading_indicator = true # 是否显示加载提示
+
diff --git a/src/mais4u/config/s4u_config.toml b/src/mais4u/config/s4u_config.toml
new file mode 100644
index 00000000..26fdef44
--- /dev/null
+++ b/src/mais4u/config/s4u_config.toml
@@ -0,0 +1,132 @@
+[inner]
+version = "1.1.0"
+
+#----以下是S4U聊天系统配置文件----
+# S4U (Smart 4 U) 聊天系统是MaiBot的核心对话模块
+# 支持优先级队列、消息中断、VIP用户等高级功能
+#
+# 如果你想要修改配置文件,请在修改后将version的值进行变更
+# 如果新增项目,请参考src/mais4u/s4u_config.py中的S4UConfig类
+#
+# 版本格式:主版本号.次版本号.修订号
+#----S4U配置说明结束----
+
+[s4u]
+# 消息管理配置
+message_timeout_seconds = 80 # 普通消息存活时间(秒),超过此时间的消息将被丢弃
+recent_message_keep_count = 8 # 保留最近N条消息,超出范围的普通消息将被移除
+
+# 优先级系统配置
+at_bot_priority_bonus = 100.0 # @机器人时的优先级加成分数
+vip_queue_priority = true # 是否启用VIP队列优先级系统
+enable_message_interruption = true # 是否允许高优先级消息中断当前回复
+
+# 打字效果配置
+typing_delay = 0.1 # 打字延迟时间(秒),模拟真实打字速度
+enable_dynamic_typing_delay = false # 是否启用基于文本长度的动态打字延迟
+
+# 动态打字延迟参数(仅在enable_dynamic_typing_delay=true时生效)
+chars_per_second = 15.0 # 每秒字符数,用于计算动态打字延迟
+min_typing_delay = 0.2 # 最小打字延迟(秒)
+max_typing_delay = 2.0 # 最大打字延迟(秒)
+
+# 系统功能开关
+enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
+enable_loading_indicator = true # 是否显示加载提示
+
+enable_streaming_output = false # 是否启用流式输出,false时全部生成后一次性发送
+
+max_context_message_length = 30
+max_core_message_length = 20
+
+# 模型配置
+[models]
+# 主要对话模型配置
+[models.chat]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+enable_thinking = false
+
+# 规划模型配置
+[models.motion]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+enable_thinking = false
+
+# 情感分析模型配置
+[models.emotion]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 记忆模型配置
+[models.memory]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 工具使用模型配置
+[models.tool_use]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 嵌入模型配置
+[models.embedding]
+name = "text-embedding-v1"
+provider = "OPENAI"
+dimension = 1024
+
+# 视觉语言模型配置
+[models.vlm]
+name = "qwen-vl-plus"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 知识库模型配置
+[models.knowledge]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 实体提取模型配置
+[models.entity_extract]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 问答模型配置
+[models.qa]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+
+# 兼容性配置(已废弃,请使用models.motion)
+[model_motion] # 在麦麦的一些组件中使用的小模型,消耗量较大,建议使用速度较快的小模型
+# 强烈建议使用免费的小模型
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+enable_thinking = false # 是否启用思考
\ No newline at end of file
diff --git a/src/mais4u/config/s4u_config_template.toml b/src/mais4u/config/s4u_config_template.toml
new file mode 100644
index 00000000..40adb1f6
--- /dev/null
+++ b/src/mais4u/config/s4u_config_template.toml
@@ -0,0 +1,67 @@
+[inner]
+version = "1.1.0"
+
+#----以下是S4U聊天系统配置文件----
+# S4U (Smart 4 U) 聊天系统是MaiBot的核心对话模块
+# 支持优先级队列、消息中断、VIP用户等高级功能
+#
+# 如果你想要修改配置文件,请在修改后将version的值进行变更
+# 如果新增项目,请参考src/mais4u/s4u_config.py中的S4UConfig类
+#
+# 版本格式:主版本号.次版本号.修订号
+#----S4U配置说明结束----
+
+[s4u]
+# 消息管理配置
+message_timeout_seconds = 120 # 普通消息存活时间(秒),超过此时间的消息将被丢弃
+recent_message_keep_count = 6 # 保留最近N条消息,超出范围的普通消息将被移除
+
+# 优先级系统配置
+at_bot_priority_bonus = 100.0 # @机器人时的优先级加成分数
+vip_queue_priority = true # 是否启用VIP队列优先级系统
+enable_message_interruption = true # 是否允许高优先级消息中断当前回复
+
+# 打字效果配置
+typing_delay = 0.1 # 打字延迟时间(秒),模拟真实打字速度
+enable_dynamic_typing_delay = false # 是否启用基于文本长度的动态打字延迟
+
+# 动态打字延迟参数(仅在enable_dynamic_typing_delay=true时生效)
+chars_per_second = 15.0 # 每秒字符数,用于计算动态打字延迟
+min_typing_delay = 0.2 # 最小打字延迟(秒)
+max_typing_delay = 2.0 # 最大打字延迟(秒)
+
+# 系统功能开关
+enable_old_message_cleanup = true # 是否自动清理过旧的普通消息
+
+enable_streaming_output = true # 是否启用流式输出,false时全部生成后一次性发送
+
+max_context_message_length = 20
+max_core_message_length = 30
+
+# 模型配置
+[models]
+# 主要对话模型配置
+[models.chat]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+enable_thinking = false
+
+# 规划模型配置
+[models.motion]
+name = "qwen3-32b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
+enable_thinking = false
+
+# 情感分析模型配置
+[models.emotion]
+name = "qwen3-8b"
+provider = "BAILIAN"
+pri_in = 0.5
+pri_out = 2
+temp = 0.7
diff --git a/src/mais4u/constant_s4u.py b/src/mais4u/constant_s4u.py
new file mode 100644
index 00000000..8a744640
--- /dev/null
+++ b/src/mais4u/constant_s4u.py
@@ -0,0 +1 @@
+ENABLE_S4U = False
\ No newline at end of file
diff --git a/src/mais4u/mai_think.py b/src/mais4u/mai_think.py
new file mode 100644
index 00000000..867ba8be
--- /dev/null
+++ b/src/mais4u/mai_think.py
@@ -0,0 +1,183 @@
+from src.chat.message_receive.chat_stream import get_chat_manager
+import time
+from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
+from src.llm_models.utils_model import LLMRequest
+from src.config.config import global_config
+from src.chat.message_receive.message import MessageRecvS4U
+from src.mais4u.mais4u_chat.s4u_msg_processor import S4UMessageProcessor
+from src.mais4u.mais4u_chat.internal_manager import internal_manager
+from src.common.logger import get_logger
+logger = get_logger(__name__)
+
+def init_prompt():
+ Prompt(
+ """
+你之前的内心想法是:{mind}
+
+{memory_block}
+{relation_info_block}
+
+{chat_target}
+{time_block}
+{chat_info}
+{identity}
+
+你刚刚在{chat_target_2},你你刚刚的心情是:{mood_state}
+---------------------
+在这样的情况下,你对上面的内容,你对 {sender} 发送的 消息 “{target}” 进行了回复
+你刚刚选择回复的内容是:{reponse}
+现在,根据你之前的想法和回复的内容,推测你现在的想法,思考你现在的想法是什么,为什么做出上面的回复内容
+请不要浮夸和夸张修辞,不要输出多余内容(包括前后缀,冒号和引号,括号(),表情包,at或 @等 )。只输出想法:""",
+ "after_response_think_prompt",
+ )
+
+
+
+
+class MaiThinking:
+ def __init__(self,chat_id):
+ self.chat_id = chat_id
+ self.chat_stream = get_chat_manager().get_stream(chat_id)
+ self.platform = self.chat_stream.platform
+
+ if self.chat_stream.group_info:
+ self.is_group = True
+ else:
+ self.is_group = False
+
+ self.s4u_message_processor = S4UMessageProcessor()
+
+ self.mind = ""
+
+ self.memory_block = ""
+ self.relation_info_block = ""
+ self.time_block = ""
+ self.chat_target = ""
+ self.chat_target_2 = ""
+ self.chat_info = ""
+ self.mood_state = ""
+ self.identity = ""
+ self.sender = ""
+ self.target = ""
+
+ self.thinking_model = LLMRequest(
+ model=global_config.model.replyer_1,
+ request_type="thinking",
+ )
+
+ async def do_think_before_response(self):
+ pass
+
+ async def do_think_after_response(self,reponse:str):
+
+ prompt = await global_prompt_manager.format_prompt(
+ "after_response_think_prompt",
+ mind=self.mind,
+ reponse=reponse,
+ memory_block=self.memory_block,
+ relation_info_block=self.relation_info_block,
+ time_block=self.time_block,
+ chat_target=self.chat_target,
+ chat_target_2=self.chat_target_2,
+ chat_info=self.chat_info,
+ mood_state=self.mood_state,
+ identity=self.identity,
+ sender=self.sender,
+ target=self.target,
+ )
+
+ result, _ = await self.thinking_model.generate_response_async(prompt)
+ self.mind = result
+
+ logger.info(f"[{self.chat_id}] 思考前想法:{self.mind}")
+ # logger.info(f"[{self.chat_id}] 思考前prompt:{prompt}")
+ logger.info(f"[{self.chat_id}] 思考后想法:{self.mind}")
+
+
+ msg_recv = await self.build_internal_message_recv(self.mind)
+ await self.s4u_message_processor.process_message(msg_recv)
+ internal_manager.set_internal_state(self.mind)
+
+
+ async def do_think_when_receive_message(self):
+ pass
+
+ async def build_internal_message_recv(self,message_text:str):
+
+ msg_id = f"internal_{time.time()}"
+
+ message_dict = {
+ "message_info": {
+ "message_id": msg_id,
+ "time": time.time(),
+ "user_info": {
+ "user_id": "internal", # 内部用户ID
+ "user_nickname": "内心", # 内部昵称
+ "platform": self.platform, # 平台标记为 internal
+ # 其他 user_info 字段按需补充
+ },
+ "platform": self.platform, # 平台
+ # 其他 message_info 字段按需补充
+ },
+ "message_segment": {
+ "type": "text", # 消息类型
+ "data": message_text, # 消息内容
+ # 其他 segment 字段按需补充
+ },
+ "raw_message": message_text, # 原始消息内容
+ "processed_plain_text": message_text, # 处理后的纯文本
+ # 下面这些字段可选,根据 MessageRecv 需要
+ "is_emoji": False,
+ "has_emoji": False,
+ "is_picid": False,
+ "has_picid": False,
+ "is_voice": False,
+ "is_mentioned": False,
+ "is_command": False,
+ "is_internal": True,
+ "priority_mode": "interest",
+ "priority_info": {"message_priority": 10.0}, # 内部消息可设高优先级
+ "interest_value": 1.0,
+ }
+
+ if self.is_group:
+ message_dict["message_info"]["group_info"] = {
+ "platform": self.platform,
+ "group_id": self.chat_stream.group_info.group_id,
+ "group_name": self.chat_stream.group_info.group_name,
+ }
+
+ msg_recv = MessageRecvS4U(message_dict)
+ msg_recv.chat_info = self.chat_info
+ msg_recv.chat_stream = self.chat_stream
+ msg_recv.is_internal = True
+
+ return msg_recv
+
+
+
+
+class MaiThinkingManager:
+ def __init__(self):
+ self.mai_think_list = []
+
+ def get_mai_think(self,chat_id):
+ for mai_think in self.mai_think_list:
+ if mai_think.chat_id == chat_id:
+ return mai_think
+ mai_think = MaiThinking(chat_id)
+ self.mai_think_list.append(mai_think)
+ return mai_think
+
+mai_thinking_manager = MaiThinkingManager()
+
+
+init_prompt()
+
+
+
+
+
+
+
+
diff --git a/src/mais4u/mais4u_chat/body_emotion_action_manager.py b/src/mais4u/mais4u_chat/body_emotion_action_manager.py
new file mode 100644
index 00000000..e7380822
--- /dev/null
+++ b/src/mais4u/mais4u_chat/body_emotion_action_manager.py
@@ -0,0 +1,315 @@
+import json
+import time
+from src.chat.message_receive.message import MessageRecv
+from src.llm_models.utils_model import LLMRequest
+from src.common.logger import get_logger
+from src.chat.utils.chat_message_builder import build_readable_messages, get_raw_msg_by_timestamp_with_chat_inclusive
+from src.config.config import global_config
+from src.chat.utils.prompt_builder import Prompt, global_prompt_manager
+from src.manager.async_task_manager import AsyncTask, async_task_manager
+from src.plugin_system.apis import send_api
+from json_repair import repair_json
+from src.mais4u.s4u_config import s4u_config
+
+logger = get_logger("action")
+
+HEAD_CODE = {
+ "看向上方": "(0,0.5,0)",
+ "看向下方": "(0,-0.5,0)",
+ "看向左边": "(-1,0,0)",
+ "看向右边": "(1,0,0)",
+ "随意朝向": "random",
+ "看向摄像机": "camera",
+ "注视对方": "(0,0,0)",
+ "看向正前方": "(0,0,0)",
+}
+
+BODY_CODE = {
+ "双手背后向前弯腰": "010_0070",
+ "歪头双手合十": "010_0100",
+ "标准文静站立": "010_0101",
+ "双手交叠腹部站立": "010_0150",
+ "帅气的姿势": "010_0190",
+ "另一个帅气的姿势": "010_0191",
+ "手掌朝前可爱": "010_0210",
+ "平静,双手后放":"平静,双手后放",
+ "思考": "思考",
+ "优雅,左手放在腰上": "优雅,左手放在腰上",
+ "一般": "一般",
+ "可爱,双手前放": "可爱,双手前放",
+}
+
+
+def init_prompt():
+ Prompt(
+ """
+{chat_talking_prompt}
+以上是群里正在进行的聊天记录
+
+{indentify_block}
+你现在的动作状态是:
+- 身体动作:{body_action}
+
+现在,因为你发送了消息,或者群里其他人发送了消息,引起了你的注意,你对其进行了阅读和思考,请你更新你的动作状态。
+身体动作可选:
+{all_actions}
+
+请只按照以下json格式输出,描述你新的动作状态,确保每个字段都存在:
+{{
+ "body_action": "..."
+}}
+""",
+ "change_action_prompt",
+ )
+ Prompt(
+ """
+{chat_talking_prompt}
+以上是群里最近的聊天记录
+
+{indentify_block}
+你之前的动作状态是
+- 身体动作:{body_action}
+
+身体动作可选:
+{all_actions}
+
+距离你上次关注群里消息已经过去了一段时间,你冷静了下来,你的动作会趋于平缓或静止,请你输出你现在新的动作状态,用中文。
+请只按照以下json格式输出,描述你新的动作状态,确保每个字段都存在:
+{{
+ "body_action": "..."
+}}
+""",
+ "regress_action_prompt",
+ )
+
+
+class ChatAction:
+ def __init__(self, chat_id: str):
+ self.chat_id: str = chat_id
+ self.body_action: str = "一般"
+ self.head_action: str = "注视摄像机"
+
+ self.regression_count: int = 0
+ # 新增:body_action冷却池,key为动作名,value为剩余冷却次数
+ self.body_action_cooldown: dict[str, int] = {}
+
+ print(s4u_config.models.motion)
+ print(global_config.model.emotion)
+
+ self.action_model = LLMRequest(
+ model=global_config.model.emotion,
+ temperature=0.7,
+ request_type="motion",
+ )
+
+ self.last_change_time = 0
+
+ async def send_action_update(self):
+ """发送动作更新到前端"""
+
+ body_code = BODY_CODE.get(self.body_action, "")
+ await send_api.custom_to_stream(
+ message_type="body_action",
+ content=body_code,
+ stream_id=self.chat_id,
+ storage_message=False,
+ show_log=True,
+ )
+
+
+
+ async def update_action_by_message(self, message: MessageRecv):
+ self.regression_count = 0
+
+ message_time = message.message_info.time
+ message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
+ chat_id=self.chat_id,
+ timestamp_start=self.last_change_time,
+ timestamp_end=message_time,
+ limit=15,
+ limit_mode="last",
+ )
+ chat_talking_prompt = build_readable_messages(
+ message_list_before_now,
+ replace_bot_name=True,
+ merge_messages=False,
+ timestamp_mode="normal_no_YMD",
+ read_mark=0.0,
+ truncate=True,
+ show_actions=True,
+ )
+
+ bot_name = global_config.bot.nickname
+ if global_config.bot.alias_names:
+ bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
+ else:
+ bot_nickname = ""
+
+ prompt_personality = global_config.personality.personality_core
+ indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
+
+ try:
+ # 冷却池处理:过滤掉冷却中的动作
+ self._update_body_action_cooldown()
+ available_actions = [k for k in BODY_CODE.keys() if k not in self.body_action_cooldown]
+ all_actions = "\n".join(available_actions)
+
+ prompt = await global_prompt_manager.format_prompt(
+ "change_action_prompt",
+ chat_talking_prompt=chat_talking_prompt,
+ indentify_block=indentify_block,
+ body_action=self.body_action,
+ all_actions=all_actions,
+ )
+
+ logger.info(f"prompt: {prompt}")
+ response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
+ logger.info(f"response: {response}")
+ logger.info(f"reasoning_content: {reasoning_content}")
+
+ action_data = json.loads(repair_json(response))
+
+ if action_data:
+ # 记录原动作,切换后进入冷却
+ prev_body_action = self.body_action
+ new_body_action = action_data.get("body_action", self.body_action)
+ if new_body_action != prev_body_action:
+ if prev_body_action:
+ self.body_action_cooldown[prev_body_action] = 3
+ self.body_action = new_body_action
+ self.head_action = action_data.get("head_action", self.head_action)
+ # 发送动作更新
+ await self.send_action_update()
+
+ self.last_change_time = message_time
+ except Exception as e:
+ logger.error(f"update_action_by_message error: {e}")
+
+ async def regress_action(self):
+ message_time = time.time()
+ message_list_before_now = get_raw_msg_by_timestamp_with_chat_inclusive(
+ chat_id=self.chat_id,
+ timestamp_start=self.last_change_time,
+ timestamp_end=message_time,
+ limit=10,
+ limit_mode="last",
+ )
+ chat_talking_prompt = build_readable_messages(
+ message_list_before_now,
+ replace_bot_name=True,
+ merge_messages=False,
+ timestamp_mode="normal_no_YMD",
+ read_mark=0.0,
+ truncate=True,
+ show_actions=True,
+ )
+
+ bot_name = global_config.bot.nickname
+ if global_config.bot.alias_names:
+ bot_nickname = f",也有人叫你{','.join(global_config.bot.alias_names)}"
+ else:
+ bot_nickname = ""
+
+ prompt_personality = global_config.personality.personality_core
+ indentify_block = f"你的名字是{bot_name}{bot_nickname},你{prompt_personality}:"
+ try:
+
+ # 冷却池处理:过滤掉冷却中的动作
+ self._update_body_action_cooldown()
+ available_actions = [k for k in BODY_CODE.keys() if k not in self.body_action_cooldown]
+ all_actions = "\n".join(available_actions)
+
+ prompt = await global_prompt_manager.format_prompt(
+ "regress_action_prompt",
+ chat_talking_prompt=chat_talking_prompt,
+ indentify_block=indentify_block,
+ body_action=self.body_action,
+ all_actions=all_actions,
+ )
+
+ logger.info(f"prompt: {prompt}")
+ response, (reasoning_content, model_name) = await self.action_model.generate_response_async(prompt=prompt)
+ logger.info(f"response: {response}")
+ logger.info(f"reasoning_content: {reasoning_content}")
+
+ action_data = json.loads(repair_json(response))
+ if action_data:
+ prev_body_action = self.body_action
+ new_body_action = action_data.get("body_action", self.body_action)
+ if new_body_action != prev_body_action:
+ if prev_body_action:
+ self.body_action_cooldown[prev_body_action] = 6
+ self.body_action = new_body_action
+ # 发送动作更新
+ await self.send_action_update()
+
+ self.regression_count += 1
+ self.last_change_time = message_time
+ except Exception as e:
+ logger.error(f"regress_action error: {e}")
+
+ # 新增:冷却池维护方法
+ def _update_body_action_cooldown(self):
+ remove_keys = []
+ for k in self.body_action_cooldown:
+ self.body_action_cooldown[k] -= 1
+ if self.body_action_cooldown[k] <= 0:
+ remove_keys.append(k)
+ for k in remove_keys:
+ del self.body_action_cooldown[k]
+
+
+class ActionRegressionTask(AsyncTask):
+ def __init__(self, action_manager: "ActionManager"):
+ super().__init__(task_name="ActionRegressionTask", run_interval=3)
+ self.action_manager = action_manager
+
+ async def run(self):
+ logger.debug("Running action regression task...")
+ now = time.time()
+ for action_state in self.action_manager.action_state_list:
+ if action_state.last_change_time == 0:
+ continue
+
+ if now - action_state.last_change_time > 10:
+ if action_state.regression_count >= 3:
+ continue
+
+ logger.info(f"chat {action_state.chat_id} 开始动作回归, 这是第 {action_state.regression_count + 1} 次")
+ await action_state.regress_action()
+
+
+class ActionManager:
+ def __init__(self):
+ self.action_state_list: list[ChatAction] = []
+ """当前动作状态"""
+ self.task_started: bool = False
+
+ async def start(self):
+ """启动动作回归后台任务"""
+ if self.task_started:
+ return
+
+ logger.info("启动动作回归任务...")
+ task = ActionRegressionTask(self)
+ await async_task_manager.add_task(task)
+ self.task_started = True
+ logger.info("动作回归任务已启动")
+
+ def get_action_state_by_chat_id(self, chat_id: str) -> ChatAction:
+ for action_state in self.action_state_list:
+ if action_state.chat_id == chat_id:
+ return action_state
+
+ new_action_state = ChatAction(chat_id)
+ self.action_state_list.append(new_action_state)
+ return new_action_state
+
+
+
+
+
+init_prompt()
+
+action_manager = ActionManager()
+"""全局动作管理器"""
diff --git a/src/mais4u/mais4u_chat/context_web_manager.py b/src/mais4u/mais4u_chat/context_web_manager.py
new file mode 100644
index 00000000..8c6cde2c
--- /dev/null
+++ b/src/mais4u/mais4u_chat/context_web_manager.py
@@ -0,0 +1,685 @@
+import asyncio
+import json
+from collections import deque
+from datetime import datetime
+from typing import Dict, List, Optional
+from aiohttp import web, WSMsgType
+import aiohttp_cors
+
+from src.chat.message_receive.message import MessageRecv
+from src.common.logger import get_logger
+
+logger = get_logger("context_web")
+
+
+class ContextMessage:
+ """上下文消息类"""
+
+ def __init__(self, message: MessageRecv):
+ self.user_name = message.message_info.user_info.user_nickname
+ self.user_id = message.message_info.user_info.user_id
+ self.content = message.processed_plain_text
+ self.timestamp = datetime.now()
+ self.group_name = message.message_info.group_info.group_name if message.message_info.group_info else "私聊"
+
+ # 识别消息类型
+ self.is_gift = getattr(message, 'is_gift', False)
+ self.is_superchat = getattr(message, 'is_superchat', False)
+
+ # 添加礼物和SC相关信息
+ if self.is_gift:
+ self.gift_name = getattr(message, 'gift_name', '')
+ self.gift_count = getattr(message, 'gift_count', '1')
+ self.content = f"送出了 {self.gift_name} x{self.gift_count}"
+ elif self.is_superchat:
+ self.superchat_price = getattr(message, 'superchat_price', '0')
+ self.superchat_message = getattr(message, 'superchat_message_text', '')
+ if self.superchat_message:
+ self.content = f"[¥{self.superchat_price}] {self.superchat_message}"
+ else:
+ self.content = f"[¥{self.superchat_price}] {self.content}"
+
+ def to_dict(self):
+ return {
+ "user_name": self.user_name,
+ "user_id": self.user_id,
+ "content": self.content,
+ "timestamp": self.timestamp.strftime("%m-%d %H:%M:%S"),
+ "group_name": self.group_name,
+ "is_gift": self.is_gift,
+ "is_superchat": self.is_superchat
+ }
+
+
+class ContextWebManager:
+ """上下文网页管理器"""
+
+ def __init__(self, max_messages: int = 10, port: int = 8765):
+ self.max_messages = max_messages
+ self.port = port
+ self.contexts: Dict[str, deque] = {} # chat_id -> deque of ContextMessage
+ self.websockets: List[web.WebSocketResponse] = []
+ self.app = None
+ self.runner = None
+ self.site = None
+ self._server_starting = False # 添加启动标志防止并发
+
+ async def start_server(self):
+ """启动web服务器"""
+ if self.site is not None:
+ logger.debug("Web服务器已经启动,跳过重复启动")
+ return
+
+ if self._server_starting:
+ logger.debug("Web服务器正在启动中,等待启动完成...")
+ # 等待启动完成
+ while self._server_starting and self.site is None:
+ await asyncio.sleep(0.1)
+ return
+
+ self._server_starting = True
+
+ try:
+ self.app = web.Application()
+
+ # 设置CORS
+ cors = aiohttp_cors.setup(self.app, defaults={
+ "*": aiohttp_cors.ResourceOptions(
+ allow_credentials=True,
+ expose_headers="*",
+ allow_headers="*",
+ allow_methods="*"
+ )
+ })
+
+ # 添加路由
+ self.app.router.add_get('/', self.index_handler)
+ self.app.router.add_get('/ws', self.websocket_handler)
+ self.app.router.add_get('/api/contexts', self.get_contexts_handler)
+ self.app.router.add_get('/debug', self.debug_handler)
+
+ # 为所有路由添加CORS
+ for route in list(self.app.router.routes()):
+ cors.add(route)
+
+ self.runner = web.AppRunner(self.app)
+ await self.runner.setup()
+
+ self.site = web.TCPSite(self.runner, 'localhost', self.port)
+ await self.site.start()
+
+ logger.info(f"🌐 上下文网页服务器启动成功在 http://localhost:{self.port}")
+
+ except Exception as e:
+ logger.error(f"❌ 启动Web服务器失败: {e}")
+ # 清理部分启动的资源
+ if self.runner:
+ await self.runner.cleanup()
+ self.app = None
+ self.runner = None
+ self.site = None
+ raise
+ finally:
+ self._server_starting = False
+
+ async def stop_server(self):
+ """停止web服务器"""
+ if self.site:
+ await self.site.stop()
+ if self.runner:
+ await self.runner.cleanup()
+ self.app = None
+ self.runner = None
+ self.site = None
+ self._server_starting = False
+
+ async def index_handler(self, request):
+ """主页处理器"""
+ html_content = '''
+
+
+
+