Merge remote-tracking branch 'upstream/dev' into dev

This commit is contained in:
DawnARC
2026-05-02 21:46:04 +08:00
31 changed files with 366 additions and 185 deletions

View File

@@ -30,9 +30,6 @@ jobs:
# - name: Clone maim_message
# run: git clone https://github.com/MaiM-with-u/maim_message maim_message
- name: Clone lpmm
run: git clone https://github.com/Mai-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
@@ -84,9 +81,6 @@ jobs:
# - name: Clone maim_message
# run: git clone https://github.com/MaiM-with-u/maim_message maim_message
- name: Clone lpmm
run: git clone https://github.com/Mai-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:

View File

@@ -34,9 +34,6 @@ jobs:
# - name: Clone maim_message
# run: git clone https://github.com/MaiM-with-u/maim_message maim_message
- name: Clone lpmm
run: git clone https://github.com/Mai-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
@@ -87,9 +84,6 @@ jobs:
# - name: Clone maim_message
# run: git clone https://github.com/MaiM-with-u/maim_message maim_message
- name: Clone lpmm
run: git clone https://github.com/Mai-with-u/MaiMBot-LPMM.git MaiMBot-LPMM
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:

View File

@@ -1,44 +1,27 @@
# 编译 LPMM
FROM python:3.13-slim AS lpmm-builder
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
WORKDIR /MaiMBot-LPMM
# 同级目录下需要有 MaiMBot-LPMM
COPY MaiMBot-LPMM /MaiMBot-LPMM
# 安装编译器和编译依赖
RUN apt-get update && apt-get install -y build-essential
RUN uv pip install --system --upgrade pip
RUN cd /MaiMBot-LPMM && uv pip install --system -r requirements.txt
# 编译 LPMM
RUN cd /MaiMBot-LPMM/lib/quick_algo && python build_lib.py --cleanup --cythonize --install
# 运行环境
# Runtime image
FROM python:3.13-slim
COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
# 工作目录
# Working directory
WORKDIR /MaiMBot
ENV MAIBOT_LEGACY_0X_UPGRADE_CONFIRMED=1
# 复制依赖列表
# Copy dependency list
COPY requirements.txt .
RUN apt-get update && apt-get install -y git
# 从编译阶段复制 LPMM 编译结果
COPY --from=lpmm-builder /usr/local/lib/python3.13/site-packages/ /usr/local/lib/python3.13/site-packages/
# 安装运行时依赖
# Install runtime dependencies
RUN uv pip install --system --upgrade pip
RUN uv pip install --system -r requirements.txt
# 复制项目代码
# Copy project source
COPY . .
RUN git clone --depth 1 --branch plugin https://github.com/Mai-with-u/MaiBot-Napcat-Adapter.git plugin-templates/MaiBot-Napcat-Adapter
RUN chmod +x docker-entrypoint.sh
EXPOSE 8000
ENTRYPOINT [ "python","bot.py" ]
ENTRYPOINT [ "./docker-entrypoint.sh" ]

View File

@@ -1,7 +1,7 @@
{
"name": "maibot-dashboard",
"private": true,
"version": "1.0.1",
"version": "1.0.2",
"type": "module",
"main": "./out/main/index.js",
"scripts": {

View File

@@ -29,7 +29,7 @@ export function Sidebar({
return (
<aside
className={cn(
'fixed inset-y-0 left-0 z-50 isolate flex flex-col border-r transition-all duration-300 lg:relative lg:z-0',
'fixed inset-y-0 left-0 z-50 isolate flex flex-col border-r transition-all duration-300 lg:relative lg:z-0 lg:h-full',
inheritsPageBackground ? 'bg-transparent' : 'bg-card',
// 移动端始终显示完整宽度,桌面端根据 sidebarOpen 切换
'w-64 lg:w-auto',
@@ -46,9 +46,11 @@ export function Sidebar({
<ScrollArea className={cn(
'relative z-10',
"flex-1 overflow-x-hidden",
"min-h-0 flex-1 overflow-x-hidden",
!sidebarOpen && "lg:w-16"
)}>
)}
viewportClassName="[&>div]:!block"
>
<nav
aria-label={t('a11y.sidebarNav')}
className={cn(

View File

@@ -6,7 +6,9 @@ import { useTour } from './use-tour'
// Joyride 主题配置
const joyrideStyles = {
options: {
zIndex: 10000,
// 提到 portal 容器99999之上确保 overlay/spotlight/tooltip 都在最上层;
// overlay 的 z-index 由 react-joyride 内部基于 options.zIndex 推算,必须大于 floater 才能让 tooltip 按钮可点击。
zIndex: 100000,
primaryColor: 'hsl(var(--color-primary))',
textColor: 'hsl(var(--color-foreground))',
backgroundColor: 'hsl(var(--color-background))',
@@ -197,13 +199,6 @@ export function TourRenderer() {
locale={locale}
scrollOffset={80}
scrollToFirstStep
floaterProps={{
styles: {
floater: {
zIndex: 99999,
},
},
}}
/>
)

View File

@@ -54,7 +54,7 @@ const DialogContent = React.forwardRef<
<DialogPrimitive.Content
ref={ref}
className={cn(
"fixed left-[50%] top-[50%] z-50 grid w-[min(calc(100vw-2rem),var(--dialog-width,32rem))] max-h-[calc(100vh-2rem)] translate-x-[-50%] translate-y-[-50%] gap-4 overflow-hidden border bg-background p-6 shadow-lg duration-200 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[state=closed]:slide-out-to-left-1/2 data-[state=closed]:slide-out-to-top-[48%] data-[state=open]:slide-in-from-left-1/2 data-[state=open]:slide-in-from-top-[48%] sm:rounded-lg",
"fixed left-[50%] top-[50%] z-50 flex w-[min(calc(100vw-2rem),var(--dialog-width,32rem))] max-h-[calc(100vh-2rem)] translate-x-[-50%] translate-y-[-50%] flex-col gap-4 overflow-hidden border bg-background p-6 shadow-lg duration-200 data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[state=closed]:slide-out-to-left-1/2 data-[state=closed]:slide-out-to-top-[48%] data-[state=open]:slide-in-from-left-1/2 data-[state=open]:slide-in-from-top-[48%] sm:rounded-lg",
className
)}
onPointerDownOutside={preventOutsideClose ? (e) => e.preventDefault() : undefined}
@@ -94,13 +94,17 @@ const DialogContent = React.forwardRef<
DialogContent.displayName = DialogPrimitive.Content.displayName
const DialogBody = React.forwardRef<HTMLDivElement, DialogBodyProps>(
({ className, children, allowHorizontalScroll = false, contentClassName, scrollbars, viewportClassName, ...props }, ref) => (
({ className, children, allowHorizontalScroll = false, contentClassName, scrollbars, viewportClassName, type, ...props }, ref) => (
// 关键:在 flex-col 的 DialogContent 中DialogBody 既要在内容多时撑到 max-h 上限并滚动,
// 又要在内容少时让 dialog 自然收缩。直接在 ScrollArea Root 上 flex-1 + min-h-0 即可:
// Radix Viewport 内部 wrapper 默认 display:table 会撑开自然高度,所以需要强制 block。
<ScrollArea
ref={ref as never}
className={cn("min-h-0 flex-1", className)}
className={cn("min-h-0 flex-1 flex flex-col", className)}
contentClassName={cn(allowHorizontalScroll && "min-w-full w-max", contentClassName)}
scrollbars={scrollbars ?? (allowHorizontalScroll ? "both" : "vertical")}
viewportClassName={cn("pr-4", viewportClassName)}
viewportClassName={cn("min-h-0 flex-1 pr-4 [&>div]:!block", viewportClassName)}
type={type ?? "always"}
{...props}
>
{children}

View File

@@ -19,7 +19,10 @@ const ScrollArea = React.forwardRef<
className={cn("relative overflow-hidden", className)}
{...props}
>
<ScrollAreaPrimitive.Viewport ref={viewportRef} className={cn("h-full w-full rounded-[inherit]", viewportClassName)}>
<ScrollAreaPrimitive.Viewport
ref={viewportRef}
className={cn("h-full w-full rounded-[inherit]", viewportClassName)}
>
<div className={contentClassName}>{children}</div>
</ScrollAreaPrimitive.Viewport>
{scrollbars !== "horizontal" && <ScrollBar />}

View File

@@ -158,7 +158,14 @@ export async function fetchProviderModels(
endpoint,
})
const response = await fetchWithAuth(`/api/webui/models/list?${params}`)
return parseResponse<ModelListItem[]>(response)
// 后端返回 { success, models, provider, count },需要展开取出 models 数组
const parsed = await parseResponse<{ models?: ModelListItem[] } | ModelListItem[]>(response)
if (!parsed.success) {
return parsed
}
const body = parsed.data
const models = Array.isArray(body) ? body : Array.isArray(body?.models) ? body.models : []
return { success: true, data: models }
}
/**

View File

@@ -5,7 +5,7 @@
* 修改此处的版本号后,所有展示版本的地方都会自动更新
*/
export const APP_VERSION = '1.0.1'
export const APP_VERSION = '1.0.2'
export const APP_NAME = 'MaiBot Dashboard'
export const APP_FULL_NAME = `${APP_NAME} v${APP_VERSION}`

View File

@@ -85,11 +85,11 @@ const modelConfigRoute = createRoute({
component: lazyRouteComponent(() => import('./routes/config/model'), 'ModelConfigPage'),
})
// 配置路由 - 麦麦适配器配置
// 配置路由 - 麦麦适配器配置(已停用,引导跳转到插件配置;旧实现保留在 ./routes/config/adapter
const adapterConfigRoute = createRoute({
getParentRoute: () => protectedRoute,
path: '/config/adapter',
component: lazyRouteComponent(() => import('./routes/config/adapter'), 'AdapterConfigPage'),
component: lazyRouteComponent(() => import('./routes/config/adapter-disabled'), 'AdapterConfigPage'),
})
// 资源管理路由 - 表情包管理

View File

@@ -0,0 +1,60 @@
import { Link } from '@tanstack/react-router'
import { ArrowRight, Info } from 'lucide-react'
import { Alert, AlertDescription, AlertTitle } from '@/components/ui/alert'
import { Button } from '@/components/ui/button'
import {
Card,
CardContent,
CardDescription,
CardHeader,
CardTitle,
} from '@/components/ui/card'
import { ScrollArea } from '@/components/ui/scroll-area'
/**
* 麦麦适配器配置 —— 禁用页
*
* 原页面({@link import('./adapter').AdapterConfigPage})的能力已迁移至
* 「插件配置」中的对应适配器插件。这里保留路由占位并引导用户跳转,
* 避免误用旧的 TOML 直接编辑路径。
*/
export function AdapterConfigPage() {
return (
<ScrollArea className="h-full">
<div className="mx-auto w-full max-w-3xl space-y-4 p-4 sm:space-y-6 sm:p-6">
<div>
<h1 className="text-2xl font-bold sm:text-3xl"></h1>
<p className="text-muted-foreground mt-1 text-sm sm:mt-2 sm:text-base">
</p>
</div>
<Alert>
<Info className="h-4 w-4" />
<AlertTitle></AlertTitle>
<AlertDescription>
Napcat
</AlertDescription>
</Alert>
<Card>
<CardHeader>
<CardTitle></CardTitle>
<CardDescription>
TOML
</CardDescription>
</CardHeader>
<CardContent>
<Button asChild>
<Link to="/plugin-config">
<ArrowRight className="ml-2 h-4 w-4" />
</Link>
</Button>
</CardContent>
</Card>
</div>
</ScrollArea>
)
}

View File

@@ -138,7 +138,11 @@ export function ProviderForm({
</DialogDescription>
</DialogHeader>
<form onSubmit={(e) => { e.preventDefault(); handleSaveEdit(); }} autoComplete="off">
<form
onSubmit={(e) => { e.preventDefault(); handleSaveEdit(); }}
autoComplete="off"
className="contents"
>
<DialogBody>
<div className="grid gap-4 py-4">
<div className="grid gap-2" data-tour="provider-template-select">

View File

@@ -16,6 +16,7 @@ interface InstalledTabProps {
checkPluginCompatibility: (plugin: PluginInfo) => boolean
needsUpdate: (plugin: PluginInfo) => boolean
getStatusBadge: (plugin: PluginInfo) => React.JSX.Element | null
getIncompatibleReason: (plugin: PluginInfo) => string | null
}
export function InstalledTab({
@@ -33,6 +34,7 @@ export function InstalledTab({
checkPluginCompatibility,
needsUpdate,
getStatusBadge,
getIncompatibleReason,
}: InstalledTabProps) {
// 过滤已安装插件
const filteredPlugins = plugins.filter(plugin => {
@@ -80,6 +82,7 @@ export function InstalledTab({
checkPluginCompatibility={checkPluginCompatibility}
needsUpdate={needsUpdate}
getStatusBadge={getStatusBadge}
getIncompatibleReason={getIncompatibleReason}
/>
))}
</div>

View File

@@ -16,6 +16,7 @@ interface MarketplaceTabProps {
checkPluginCompatibility: (plugin: PluginInfo) => boolean
needsUpdate: (plugin: PluginInfo) => boolean
getStatusBadge: (plugin: PluginInfo) => React.JSX.Element | null
getIncompatibleReason: (plugin: PluginInfo) => string | null
}
export function MarketplaceTab({
@@ -33,6 +34,7 @@ export function MarketplaceTab({
checkPluginCompatibility,
needsUpdate,
getStatusBadge,
getIncompatibleReason,
}: MarketplaceTabProps) {
// 过滤插件
const filteredPlugins = plugins.filter(plugin => {
@@ -76,6 +78,7 @@ export function MarketplaceTab({
checkPluginCompatibility={checkPluginCompatibility}
needsUpdate={needsUpdate}
getStatusBadge={getStatusBadge}
getIncompatibleReason={getIncompatibleReason}
/>
))}
</div>

View File

@@ -20,6 +20,7 @@ interface PluginCardProps {
checkPluginCompatibility: (plugin: PluginInfo) => boolean
needsUpdate: (plugin: PluginInfo) => boolean
getStatusBadge: (plugin: PluginInfo) => React.JSX.Element | null
getIncompatibleReason: (plugin: PluginInfo) => string | null
}
export function PluginCard({
@@ -34,6 +35,7 @@ export function PluginCard({
checkPluginCompatibility,
needsUpdate,
getStatusBadge,
getIncompatibleReason,
}: PluginCardProps) {
const navigate = useNavigate()
@@ -114,8 +116,14 @@ export function PluginCard({
needsUpdate(plugin) ? (
<Button
size="sm"
disabled={!gitStatus?.installed}
title={!gitStatus?.installed ? 'Git 未安装' : undefined}
disabled={!gitStatus?.installed || (maimaiVersion !== null && !checkPluginCompatibility(plugin))}
title={
!gitStatus?.installed
? 'Git 未安装'
: (maimaiVersion !== null && !checkPluginCompatibility(plugin))
? (getIncompatibleReason(plugin) ?? '插件与当前麦麦版本不兼容')
: undefined
}
onClick={() => onUpdate(plugin)}
>
<RefreshCw className="h-4 w-4 mr-1" />
@@ -145,7 +153,7 @@ export function PluginCard({
!gitStatus?.installed
? 'Git 未安装'
: (maimaiVersion !== null && !checkPluginCompatibility(plugin))
? `不兼容当前版本 (需要 ${plugin.manifest?.host_application?.min_version || '未知'}${plugin.manifest?.host_application?.max_version ? ` - ${plugin.manifest.host_application.max_version}` : '+'},当前 ${maimaiVersion?.version})`
? (getIncompatibleReason(plugin) ?? '插件与当前麦麦版本不兼容')
: undefined
}
onClick={() => onInstall(plugin)}

View File

@@ -268,8 +268,8 @@ function PluginsPageContent() {
// 获取插件状态徽章
const getStatusBadge = (plugin: PluginInfo) => {
// 优先显示兼容性状态
if (!plugin.installed && maimaiVersion && !checkPluginCompatibility(plugin)) {
// 优先显示兼容性状态(已安装但不兼容也需要提示,避免用户误以为可继续更新)
if (maimaiVersion && !checkPluginCompatibility(plugin)) {
return (
<Badge variant="destructive" className="gap-1">
<AlertCircle className="h-3 w-3" />
@@ -317,8 +317,19 @@ function PluginsPageContent() {
}
// 检查插件兼容性
// 规则:
// 1. manifest_version === 1 的插件在麦麦 >= 1.0.0 时一律视为不兼容(旧 manifest 已不再被宿主接受);
// 2. 否则若声明了 host_application 范围,则按版本范围判定。
const checkPluginCompatibility = (plugin: PluginInfo): boolean => {
if (!maimaiVersion || !plugin.manifest?.host_application) return true
if (!maimaiVersion) return true
// manifest v1 在 1.0.0+ 麦麦上不再兼容
const manifestVersion = plugin.manifest?.manifest_version ?? 1
if (manifestVersion <= 1 && maimaiVersion.version_major >= 1) {
return false
}
if (!plugin.manifest?.host_application) return true
return isPluginCompatible(
plugin.manifest.host_application.min_version,
@@ -327,11 +338,35 @@ function PluginsPageContent() {
)
}
// 不兼容原因(用于 UI 提示)
const getIncompatibleReason = (plugin: PluginInfo): string | null => {
if (!maimaiVersion) return null
const manifestVersion = plugin.manifest?.manifest_version ?? 1
if (manifestVersion <= 1 && maimaiVersion.version_major >= 1) {
return `该插件使用旧版 manifest (v${manifestVersion}),已不被麦麦 ${maimaiVersion.version} 支持`
}
if (plugin.manifest?.host_application && !isPluginCompatible(
plugin.manifest.host_application.min_version,
plugin.manifest.host_application.max_version,
maimaiVersion
)) {
const min = plugin.manifest.host_application.min_version || '未知'
const max = plugin.manifest.host_application.max_version
const range = max ? `${min} - ${max}` : `${min}+`
return `不兼容当前版本 (需要 ${range},当前 ${maimaiVersion.version})`
}
return null
}
// 检查是否需要更新(市场版本比已安装版本新)
const needsUpdate = (plugin: PluginInfo): boolean => {
if (!plugin.installed || !plugin.installed_version || !plugin.manifest?.version) {
return false
}
// 不兼容的插件不允许更新
if (!checkPluginCompatibility(plugin)) {
return false
}
const installedVer = plugin.installed_version.trim()
const marketVer = plugin.manifest.version.trim()
@@ -368,7 +403,7 @@ function PluginsPageContent() {
if (maimaiVersion && !checkPluginCompatibility(plugin)) {
toast({
title: '无法安装',
description: '插件与当前麦麦版本不兼容',
description: getIncompatibleReason(plugin) ?? '插件与当前麦麦版本不兼容',
variant: 'destructive',
})
return
@@ -526,6 +561,16 @@ function PluginsPageContent() {
return
}
// 不兼容的插件不允许更新
if (maimaiVersion && !checkPluginCompatibility(plugin)) {
toast({
title: '无法更新',
description: getIncompatibleReason(plugin) ?? '插件与当前麦麦版本不兼容',
variant: 'destructive',
})
return
}
try {
const updateResult = await updatePlugin(
plugin.id,
@@ -833,6 +878,7 @@ function PluginsPageContent() {
checkPluginCompatibility={checkPluginCompatibility}
needsUpdate={needsUpdate}
getStatusBadge={getStatusBadge}
getIncompatibleReason={getIncompatibleReason}
/>
) : activeTab === 'installed' ? (
<InstalledTab
@@ -850,6 +896,7 @@ function PluginsPageContent() {
checkPluginCompatibility={checkPluginCompatibility}
needsUpdate={needsUpdate}
getStatusBadge={getStatusBadge}
getIncompatibleReason={getIncompatibleReason}
/>
) : (
<div className="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-6">

View File

@@ -1,22 +1,4 @@
services:
adapters:
container_name: maim-bot-adapters
#### prod ####
image: unclas/maimbot-adapter:latest
# image: infinitycat/maimbot-adapter:latest
#### dev ####
# image: unclas/maimbot-adapter:dev
# image: infinitycat/maimbot-adapter:dev
environment:
- TZ=Asia/Shanghai
# ports:
# - "8095:8095"
volumes:
- ./docker-config/adapters/config.toml:/adapters/config.toml # 持久化adapters配置文件
- ./data/adapters:/adapters/data # adapters 数据持久化
restart: always
networks:
- maim_bot
core:
container_name: maim-bot-core
#### prod ####
@@ -27,6 +9,8 @@ services:
# image: infinitycat/maibot:dev
environment:
- TZ=Asia/Shanghai
- EULA_AGREE=1b662741904d7155d1ce1c00b3530d0d
- PRIVACY_AGREE=9943b855e72199d0f5016ea39052f1b6
- MAIBOT_LEGACY_0X_UPGRADE_CONFIRMED=1 # Docker 无法交互确认旧版升级迁移,默认跳过确认提示
# - EULA_AGREE=1b662741904d7155d1ce1c00b3530d0d # 同意EULA
# - PRIVACY_AGREE=9943b855e72199d0f5016ea39052f1b6 # 同意EULA
@@ -36,9 +20,9 @@ services:
volumes:
# 监听地址和端口已迁移到 ./docker-config/mmc/bot_config.toml 的 maim_message 与 webui 配置段
- ./docker-config/mmc:/MaiMBot/config # 持久化bot配置文件
- ./docker-config/adapters:/MaiMBot/adapters-config # adapter配置文件夹映射
- ./data/MaiMBot/maibot_statistics.html:/MaiMBot/maibot_statistics.html #统计数据输出
- ./data/MaiMBot:/MaiMBot/data # 共享目录
- ./data/MaiMBot/emoji:/data/emoji # 持久化表情包
- ./data/MaiMBot/plugins:/MaiMBot/plugins # 插件目录
- ./data/MaiMBot/logs:/MaiMBot/logs # 日志目录
# - site-packages:/usr/local/lib/python3.13/site-packages # 持久化Python包需要时启用

13
docker-entrypoint.sh Normal file
View File

@@ -0,0 +1,13 @@
#!/bin/sh
set -eu
ADAPTER_TEMPLATE="/MaiMBot/plugin-templates/MaiBot-Napcat-Adapter"
ADAPTER_TARGET="/MaiMBot/plugins/MaiBot-Napcat-Adapter"
mkdir -p /MaiMBot/plugins
if [ ! -e "$ADAPTER_TARGET" ] && [ -d "$ADAPTER_TEMPLATE" ]; then
cp -a "$ADAPTER_TEMPLATE" "$ADAPTER_TARGET"
fi
exec python bot.py "$@"

View File

@@ -19,8 +19,8 @@ dependencies = [
"jieba>=0.42.1",
"json-repair>=0.47.6",
"maim-message>=0.6.2",
"maibot-dashboard==1.0.0.dev2026040439",
"maibot-plugin-sdk>=2.3.0",
"maibot-dashboard==1.0.1.dev2026050251",
"maibot-plugin-sdk>=2.4.0",
"matplotlib>=3.10.5",
"mcp",
"msgpack>=1.1.2",

View File

@@ -1,6 +1,7 @@
from datetime import datetime
from types import SimpleNamespace
import asyncio
import pytest
from src.core.tooling import ToolExecutionResult, ToolInvocation
@@ -8,6 +9,7 @@ from src.llm_models.payload_content.tool_option import ToolCall
from src.maisaka.chat_loop_service import ChatResponse, MaisakaChatLoopService
from src.maisaka.context_messages import AssistantMessage, TIMING_GATE_INVALID_TOOL_HINT_SOURCE
from src.maisaka.reasoning_engine import MaisakaReasoningEngine
from src.maisaka.runtime import MaisakaHeartFlowChatting
def _build_chat_response(tool_calls: list[ToolCall]) -> ChatResponse:
@@ -173,6 +175,29 @@ def test_timing_gate_invalid_tool_hint_only_visible_to_timing_gate() -> None:
assert planner_history == []
def test_forced_timing_trigger_bypasses_message_frequency_threshold() -> None:
runtime = SimpleNamespace(
_STATE_WAIT="wait",
_agent_state="stop",
_message_turn_scheduled=False,
_internal_turn_queue=asyncio.Queue(),
_has_pending_messages=lambda: True,
_get_pending_message_count=lambda: 1,
_has_forced_timing_trigger=lambda: True,
_cancel_deferred_message_turn_task=lambda: None,
)
def _fail_get_message_trigger_threshold() -> int:
raise AssertionError("@/提及必回不应被普通聊天频率阈值拦住")
runtime._get_message_trigger_threshold = _fail_get_message_trigger_threshold
MaisakaHeartFlowChatting._schedule_message_turn(runtime) # type: ignore[arg-type]
assert runtime._message_turn_scheduled is True
assert runtime._internal_turn_queue.get_nowait() == "message"
def test_finish_tool_is_not_written_back_to_history() -> None:
finish_call = ToolCall(call_id="finish-call", func_name="finish", args={})
reply_call = ToolCall(call_id="reply-call", func_name="reply", args={})
@@ -213,3 +238,47 @@ def test_finish_tool_removes_empty_assistant_history_message() -> None:
)
assert runtime._chat_history == []
def test_timing_gate_head_trim_keeps_short_history() -> None:
messages = [
AssistantMessage(content="第一条消息", timestamp=datetime.now()),
AssistantMessage(content="第二条消息", timestamp=datetime.now()),
]
trimmed_messages = MaisakaHeartFlowChatting._drop_head_context_messages(
messages,
drop_context_count=3,
)
assert trimmed_messages == messages
def test_timing_gate_head_trim_keeps_history_within_config_limit() -> None:
messages = [
AssistantMessage(content=f"消息 {index}", timestamp=datetime.now())
for index in range(10)
]
trimmed_messages = MaisakaHeartFlowChatting._drop_head_context_messages(
messages,
drop_context_count=7,
trim_threshold_context_count=10,
)
assert trimmed_messages == messages
def test_timing_gate_head_trim_applies_after_config_limit_exceeded() -> None:
messages = [
AssistantMessage(content=f"消息 {index}", timestamp=datetime.now())
for index in range(11)
]
trimmed_messages = MaisakaHeartFlowChatting._drop_head_context_messages(
messages,
drop_context_count=7,
trim_threshold_context_count=10,
)
assert trimmed_messages == messages[7:]

View File

@@ -9,7 +9,7 @@ httpx
jieba>=0.42.1
json-repair>=0.47.6
maim-message>=0.6.2
maibot-plugin-sdk>=1.2.3,<2.0.0
maibot-plugin-sdk>=2.4.0
mcp
msgpack>=1.1.2
numpy>=2.2.6

View File

@@ -223,7 +223,7 @@ def is_mentioned_bot_in_message(message: SessionMessage) -> tuple[bool, bool, fl
break
# 7) 概率设置
if is_at and getattr(global_config.chat, "at_bot_inevitable_reply", 1):
if is_at and getattr(global_config.chat, "inevitable_at_reply", 1):
reply_probability = 1.0
logger.debug("被@回复概率设置为100%")
elif is_mentioned and getattr(global_config.chat, "mentioned_bot_reply", 1):

View File

@@ -57,7 +57,7 @@ MODEL_CONFIG_PATH: Path = (CONFIG_DIR / "model_config.toml").resolve().absolute(
LEGACY_ENV_PATH: Path = (PROJECT_ROOT / ".env").resolve().absolute()
MMC_VERSION: str = "1.0.0"
CONFIG_VERSION: str = "8.9.20"
MODEL_CONFIG_VERSION: str = "1.14.3"
MODEL_CONFIG_VERSION: str = "1.14.5"
logger = get_logger("config")

View File

@@ -11,26 +11,29 @@ DEFAULT_PROVIDER_TEMPLATES: list[dict[str, Any]] = [
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"api_key": "your-api-key",
"auth_type": OpenAICompatibleAuthType.BEARER.value,
"max_retry": 3,
"timeout": 100,
"retry_interval": 8,
}
]
DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
"utils": {
"model_list": ["qwen3.5-35b-a3b-nonthink"],
"model_list": ["deepseek-v4-flash"],
"max_tokens": 4096,
"temperature": 0.5,
"slow_threshold": 15.0,
"selection_strategy": "random",
},
"replyer": {
"model_list": ["ali-glm-5"],
"model_list": ["deepseek-v4-pro-think", "deepseek-v4-pro-nonthink"],
"max_tokens": 4096,
"temperature": 1,
"slow_threshold": 120.0,
"selection_strategy": "random",
},
"planner": {
"model_list": ["qwen3.5-35b-a3b", "qwen3.5-122b-a10b", "qwen3.5-flash"],
"model_list": ["deepseek-v4-flash"],
"max_tokens": 8000,
"temperature": 0.7,
"slow_threshold": 12.0,
@@ -61,40 +64,30 @@ DEFAULT_TASK_CONFIG_TEMPLATES: dict[str, dict[str, Any]] = {
DEFAULT_MODEL_TEMPLATES: list[dict[str, Any]] = [
{
"model_identifier": "glm-5",
"name": "ali-glm-5",
"model_identifier": "deepseek-v4-pro",
"name": "deepseek-v4-pro-think",
"api_provider": "BaiLian",
"price_in": 3.0,
"price_out": 14.0,
"temperature": 1.0,
"price_in": 12.0,
"price_out": 24.0,
"visual": False,
"extra_params": {"enable_thinking": False},
"extra_params": {"enable_thinking": "True"},
},
{
"model_identifier": "qwen3.5-122b-a10b",
"name": "qwen3.5-122b-a10b",
"model_identifier": "deepseek-v4-pro",
"name": "deepseek-v4-pro-nonthink",
"api_provider": "BaiLian",
"price_in": 0.8,
"price_out": 6.4,
"visual": True,
"price_in": 12.0,
"price_out": 24.0,
"visual": False,
"extra_params": {"enable_thinking": "false"},
},
{
"model_identifier": "qwen3.5-35b-a3b",
"name": "qwen3.5-35b-a3b",
"model_identifier": "deepseek-v4-flash",
"name": "deepseek-v4-flash",
"api_provider": "BaiLian",
"price_in": 0.4,
"price_out": 3.2,
"visual": True,
"extra_params": {},
},
{
"model_identifier": "qwen3.5-35b-a3b",
"name": "qwen3.5-35b-a3b-nonthink",
"api_provider": "BaiLian",
"price_in": 0.4,
"price_out": 3.2,
"visual": True,
"price_in": 1.0,
"price_out": 2.0,
"visual": False,
"extra_params": {"enable_thinking": "false"},
},
{

View File

@@ -172,7 +172,7 @@ class APIProvider(ConfigBase):
"""工具参数解析模式。可选值:`auto`、`strict`、`repair`、`double_decode`。"""
max_retry: int = Field(
default=2,
default=3,
ge=0,
json_schema_extra={
"x-widget": "input",
@@ -182,7 +182,7 @@ class APIProvider(ConfigBase):
"""最大重试次数 (单个模型API调用失败, 最多重试的次数)"""
timeout: int = Field(
default=10,
default=60,
ge=1,
json_schema_extra={
"x-widget": "input",
@@ -193,7 +193,7 @@ class APIProvider(ConfigBase):
"""API调用的超时时长 (超过这个时长, 本次请求将被视为"请求超时", 单位: 秒)"""
retry_interval: int = Field(
default=10,
default=5,
ge=1,
json_schema_extra={
"x-widget": "input",

View File

@@ -4,8 +4,6 @@ from collections.abc import Awaitable, Callable, Sequence
from dataclasses import dataclass, field
from typing import Any, Optional, TYPE_CHECKING
import random
from src.chat.message_receive.chat_manager import chat_manager
from src.cli.maisaka_cli_sender import CLI_PLATFORM_NAME, render_cli_message
from src.common.data_models.image_data_model import MaiEmoji
@@ -121,45 +119,13 @@ def _normalize_emotions(emoji: MaiEmoji) -> list[str]:
return []
async def select_emoji_for_maisaka(
*,
requested_emotion: str = "",
reasoning: str = "",
context_texts: Sequence[str] | None = None,
sample_size: int = 30,
) -> tuple[MaiEmoji | None, str]:
"""为 Maisaka 选择一个合适的表情。"""
del reasoning, context_texts
available_emojis = list(emoji_manager.emojis)
if not available_emojis:
return None, ""
normalized_requested_emotion = requested_emotion.strip()
if normalized_requested_emotion:
matched_emojis = [
emoji
for emoji in available_emojis
if normalized_requested_emotion.lower() in (emotion.lower() for emotion in _normalize_emotions(emoji))
]
if matched_emojis:
return random.choice(matched_emojis), normalized_requested_emotion
sampled_emojis = random.sample(
available_emojis,
min(max(sample_size, 1), len(available_emojis)),
)
return random.choice(sampled_emojis), ""
async def send_emoji_for_maisaka(
*,
stream_id: str,
emoji_selector: EmojiSelector,
requested_emotion: str = "",
reasoning: str = "",
context_texts: Sequence[str] | None = None,
emoji_selector: EmojiSelector | None = None,
) -> MaisakaEmojiSendResult:
"""为 Maisaka 选择并发送一个表情。"""
@@ -194,20 +160,12 @@ async def send_emoji_for_maisaka(
normalized_context_texts = _normalize_context_texts(before_select_kwargs.get("context_texts"))
sample_size = _coerce_positive_int(before_select_kwargs.get("sample_size"), sample_size)
if emoji_selector is None:
selected_emoji, matched_emotion = await select_emoji_for_maisaka(
requested_emotion=normalized_requested_emotion,
reasoning=normalized_reasoning,
context_texts=normalized_context_texts,
sample_size=sample_size,
)
else:
selected_emoji, matched_emotion = await emoji_selector(
normalized_requested_emotion,
normalized_reasoning,
normalized_context_texts,
sample_size,
)
selected_emoji, matched_emotion = await emoji_selector(
normalized_requested_emotion,
normalized_reasoning,
normalized_context_texts,
sample_size,
)
after_select_result = await _get_runtime_manager().invoke_hook(
"emoji.maisaka.after_select",
stream_id=stream_id,

View File

@@ -2,6 +2,7 @@
from datetime import datetime
from io import BytesIO
from json import dumps
from random import sample
from typing import Any, Dict, Optional
@@ -17,9 +18,8 @@ from src.emoji_system.maisaka_tool import send_emoji_for_maisaka
from src.common.data_models.image_data_model import MaiEmoji
from src.common.data_models.message_component_data_model import ImageComponent, MessageSequence, TextComponent
from src.common.logger import get_logger
from src.config.config import global_config
from src.config.config import config_manager, global_config
from src.core.tooling import ToolExecutionContext, ToolExecutionResult, ToolInvocation, ToolSpec
from src.llm_models.payload_content.resp_format import RespFormat, RespFormatType
from src.llm_models.payload_content.message import MessageBuilder, RoleType
from src.maisaka.context_messages import (
LLMContextMessage,
@@ -221,6 +221,7 @@ def _build_send_emoji_monitor_detail(
detail: Dict[str, Any] = {}
if isinstance(request_messages, list) and request_messages:
detail["request_messages"] = request_messages
detail["prompt_text"] = dumps(request_messages, ensure_ascii=False, indent=2)
if reasoning_text.strip():
detail["reasoning_text"] = reasoning_text.strip()
if output_text.strip():
@@ -279,6 +280,24 @@ def _build_send_emoji_monitor_metadata(
return {}
def _resolve_emoji_selector_model_task_name() -> str:
"""根据 planner 模型视觉能力选择表情选择子代理的模型任务。"""
model_config = config_manager.get_model_config()
planner_models = [
model_name
for model_name in model_config.model_task_config.planner.model_list
if str(model_name).strip()
]
models_by_name = {model.name: model for model in model_config.models}
if planner_models and all(
model_name in models_by_name and models_by_name[model_name].visual
for model_name in planner_models
):
return "planner"
return "vlm"
async def _select_emoji_with_sub_agent(
tool_ctx: BuiltinToolRuntimeContext,
reasoning: str,
@@ -326,7 +345,8 @@ async def _select_emoji_with_sub_agent(
prompt_llm_message = prompt_message.to_llm_message()
if prompt_llm_message is not None:
request_messages.append(prompt_llm_message)
candidate_llm_message = candidate_message.to_llm_message()
candidate_to_llm_message = getattr(candidate_message, "to_llm_message", None)
candidate_llm_message = candidate_to_llm_message() if callable(candidate_to_llm_message) else None
if candidate_llm_message is not None:
request_messages.append(candidate_llm_message)
serialized_request_messages = serialize_prompt_messages(request_messages)
@@ -337,10 +357,7 @@ async def _select_emoji_with_sub_agent(
system_prompt=system_prompt,
extra_messages=[prompt_message, candidate_message],
max_tokens=_EMOJI_SUB_AGENT_MAX_TOKENS,
response_format=RespFormat(
format_type=RespFormatType.JSON_SCHEMA,
schema=EmojiSelectionResult,
),
model_task_name=_resolve_emoji_selector_model_task_name(),
)
selection_duration_ms = round((datetime.now() - selection_started_at).total_seconds() * 1000, 2)
@@ -409,12 +426,16 @@ async def handle_tool(
"reason": "",
}
selection_metadata: Dict[str, Any] = {"reason": "", "monitor_detail": {}}
requested_emotion = ""
if isinstance(invocation.arguments, dict):
requested_emotion = str(invocation.arguments.get("emotion") or "").strip()
logger.info(f"{tool_ctx.runtime.log_prefix} 触发表情包发送工具")
try:
send_result = await send_emoji_for_maisaka(
stream_id=tool_ctx.runtime.session_id,
requested_emotion=requested_emotion,
reasoning=tool_ctx.engine.last_reasoning_content,
context_texts=context_texts,
emoji_selector=lambda _requested_emotion, reasoning, context_texts, sample_size: _select_emoji_with_sub_agent(

View File

@@ -194,6 +194,7 @@ class MaisakaChatLoopService:
session_id: Optional[str] = None,
is_group_chat: Optional[bool] = None,
max_tokens: int = 2048,
model_task_name: str = "planner",
) -> None:
"""初始化 Maisaka 对话循环服务。
@@ -205,6 +206,7 @@ class MaisakaChatLoopService:
"""
self._max_tokens = max_tokens
self._model_task_name = model_task_name.strip() or "planner"
self._is_group_chat = is_group_chat
self._session_id = session_id or ""
self._extra_tools: List[ToolOption] = []
@@ -236,17 +238,18 @@ class MaisakaChatLoopService:
)
def _get_llm_chat_client(self, request_kind: str) -> LLMServiceClient:
"""获取当前请求类型对应的 planner LLM 客户端。"""
"""获取当前请求类型对应的 LLM 客户端。"""
request_type = self._resolve_llm_request_type(request_kind)
llm_client = self._llm_chat_clients.get(request_type)
client_key = f"{self._model_task_name}:{request_type}"
llm_client = self._llm_chat_clients.get(client_key)
if llm_client is None:
llm_client = LLMServiceClient(
task_name="planner",
task_name=self._model_task_name,
request_type=request_type,
session_id=self._session_id,
)
self._llm_chat_clients[request_type] = llm_client
self._llm_chat_clients[client_key] = llm_client
return llm_client
@staticmethod

View File

@@ -473,13 +473,18 @@ class MaisakaHeartFlowChatting:
def _update_message_trigger_state(self, message: SessionMessage) -> None:
"""补齐消息中的 @/提及 标记,并在命中时启用强制 continue。"""
detected_mentioned, detected_at, _ = is_mentioned_bot_in_message(message)
detected_mentioned, detected_at, reply_probability_boost = is_mentioned_bot_in_message(message)
if detected_at:
message.is_at = True
if detected_mentioned:
message.is_mentioned = True
if not message.is_at and not message.is_mentioned:
should_force_reply = (
reply_probability_boost >= 1.0
or (message.is_at and global_config.chat.inevitable_at_reply)
or (message.is_mentioned and global_config.chat.mentioned_bot_reply)
)
if not should_force_reply or (not message.is_at and not message.is_mentioned):
return
self._arm_force_next_timing_continue(
@@ -537,6 +542,11 @@ class MaisakaHeartFlowChatting:
self._force_next_timing_reason = ""
return reason
def _has_forced_timing_trigger(self) -> bool:
"""判断是否已有 @/提及必回触发,需绕过普通频率阈值。"""
return self._force_next_timing_continue
def _bind_planner_interrupt_flag(self, interrupt_flag: asyncio.Event) -> None:
"""绑定当前可打断请求使用的中断标记。"""
self._planner_interrupt_flag = interrupt_flag
@@ -590,6 +600,7 @@ class MaisakaHeartFlowChatting:
extra_messages: Optional[Sequence[LLMContextMessage]] = None,
interrupt_flag: asyncio.Event | None = None,
max_tokens: int = 512,
model_task_name: str = "planner",
response_format: RespFormat | None = None,
tool_definitions: Optional[Sequence[ToolDefinitionInput]] = None,
) -> ChatResponse:
@@ -603,6 +614,7 @@ class MaisakaHeartFlowChatting:
sub_agent_history = self._drop_head_context_messages(
selected_history,
drop_head_context_count,
trim_threshold_context_count=context_message_limit,
)
if extra_messages:
sub_agent_history.extend(list(extra_messages))
@@ -612,6 +624,7 @@ class MaisakaHeartFlowChatting:
session_id=self.session_id,
is_group_chat=self.chat_stream.is_group_session,
max_tokens=max_tokens,
model_task_name=model_task_name,
)
sub_agent.set_interrupt_flag(interrupt_flag)
return await sub_agent.chat_loop_step(
@@ -625,12 +638,21 @@ class MaisakaHeartFlowChatting:
def _drop_head_context_messages(
chat_history: Sequence[LLMContextMessage],
drop_context_count: int,
*,
trim_threshold_context_count: int | None = None,
) -> list[LLMContextMessage]:
"""从已选上下文头部丢弃指定数量的普通上下文消息。"""
if drop_context_count <= 0:
return list(chat_history)
context_message_count = sum(1 for message in chat_history if message.count_in_context)
if trim_threshold_context_count is not None and context_message_count <= trim_threshold_context_count:
return list(chat_history)
if context_message_count <= drop_context_count:
return list(chat_history)
first_kept_index = 0
dropped_context_count = 0
while (
@@ -867,6 +889,12 @@ class MaisakaHeartFlowChatting:
if pending_count <= 0:
return
if self._has_forced_timing_trigger():
self._cancel_deferred_message_turn_task()
self._message_turn_scheduled = True
self._internal_turn_queue.put_nowait("message")
return
trigger_threshold = self._get_message_trigger_threshold()
if pending_count >= trigger_threshold or self._should_trigger_message_turn_by_idle_compensation(
pending_count=pending_count,

View File

@@ -146,6 +146,11 @@ async def _fetch_models_from_provider(
client_config = build_openai_compatible_client_config(provider)
headers.update(client_config.default_headers)
params.update(client_config.default_query)
# build_openai_compatible_client_config 在“默认 Bearer”场景下
# 会把 api_key 留在 client_config.api_key 中交给 OpenAI SDK 自行注入 Authorization 头,
# 而不会写入 default_headers。这里我们用 httpx 直接发请求,需要手动补上鉴权头/参数。
if client_config.api_key and "Authorization" not in headers:
headers["Authorization"] = f"Bearer {client_config.api_key}"
try:
async with httpx.AsyncClient(timeout=30.0) as client: