后端: 1.收口阶段 6 agent 结构迁移,将 newAgent 内核与 agentsvc 编排层迁入 services/agent - 切换 Agent 启动装配与 HTTP handler 直连 agent sv,移除旧 service agent bridge - 补齐 Agent 对 memory、task、task-class、schedule 的 RPC 适配与契约字段 - 扩展 schedule、task、task-class RPC/contract 支撑 Agent 查询、写入与 provider 切流 - 更新迁移文档、README 与相关注释,明确 agent 当前切流点和剩余 memory 迁移面
113 lines
3.3 KiB
Go
113 lines
3.3 KiB
Go
package sv
|
||
|
||
import (
|
||
"context"
|
||
"errors"
|
||
"log"
|
||
"strings"
|
||
|
||
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
|
||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||
)
|
||
|
||
const reasoningSummaryMaxTokens = 700
|
||
|
||
type reasoningSummaryLLMResponse struct {
|
||
ShortSummary string `json:"short_summary"`
|
||
DetailSummary string `json:"detail_summary"`
|
||
}
|
||
|
||
// makeReasoningSummaryFunc 把便宜模型封装成 stream 层可注入的摘要函数。
|
||
//
|
||
// 职责边界:
|
||
// 1. service 层负责选择模型与 prompt,stream 层只负责调度和闸门;
|
||
// 2. 这里不持久化摘要,持久化统一走 ChunkEmitter 的 extra hook;
|
||
// 3. 摘要失败时返回 error,由 ReasoningDigestor 吞掉并等待下一次水位线/Flush 兜底。
|
||
func (s *AgentService) makeReasoningSummaryFunc(client *llmservice.Client) agentstream.ReasoningSummaryFunc {
|
||
if client == nil {
|
||
return nil
|
||
}
|
||
|
||
return func(ctx context.Context, input agentstream.ReasoningSummaryInput) (agentstream.StreamThinkingSummaryExtra, error) {
|
||
previousSummary := ""
|
||
if input.PreviousSummary != nil {
|
||
previousSummary = input.PreviousSummary.DetailSummary
|
||
if strings.TrimSpace(previousSummary) == "" {
|
||
previousSummary = input.PreviousSummary.ShortSummary
|
||
}
|
||
}
|
||
|
||
messages := agentprompt.BuildReasoningSummaryMessages(agentprompt.ReasoningSummaryPromptInput{
|
||
FullReasoning: input.FullReasoning,
|
||
DeltaReasoning: input.DeltaReasoning,
|
||
PreviousSummary: previousSummary,
|
||
CandidateSeq: input.CandidateSeq,
|
||
Final: input.Final,
|
||
DurationSeconds: input.DurationSeconds,
|
||
})
|
||
|
||
resp, rawResult, err := llmservice.GenerateJSON[reasoningSummaryLLMResponse](
|
||
ctx,
|
||
client,
|
||
messages,
|
||
llmservice.GenerateOptions{
|
||
Temperature: 0.1,
|
||
MaxTokens: reasoningSummaryMaxTokens,
|
||
Thinking: llmservice.ThinkingModeDisabled,
|
||
Metadata: map[string]any{
|
||
"stage": "reasoning_summary",
|
||
"candidate_seq": input.CandidateSeq,
|
||
"final": input.Final,
|
||
},
|
||
},
|
||
)
|
||
if err != nil {
|
||
log.Printf("[WARN] reasoning 摘要模型调用失败 seq=%d final=%v err=%v raw=%s",
|
||
input.CandidateSeq,
|
||
input.Final,
|
||
err,
|
||
truncateReasoningSummaryRaw(rawResult),
|
||
)
|
||
return agentstream.StreamThinkingSummaryExtra{}, err
|
||
}
|
||
|
||
summary := agentstream.StreamThinkingSummaryExtra{
|
||
ShortSummary: strings.TrimSpace(resp.ShortSummary),
|
||
DetailSummary: limitReasoningDetailSummary(
|
||
resp.DetailSummary,
|
||
agentprompt.ReasoningSummaryDetailRuneLimit(input.FullReasoning, input.DeltaReasoning),
|
||
),
|
||
}
|
||
if summary.ShortSummary == "" && summary.DetailSummary == "" {
|
||
return agentstream.StreamThinkingSummaryExtra{}, errors.New("reasoning 摘要模型返回空摘要")
|
||
}
|
||
return summary, nil
|
||
}
|
||
}
|
||
|
||
func limitReasoningDetailSummary(text string, maxRunes int) string {
|
||
text = strings.TrimSpace(text)
|
||
if text == "" || maxRunes <= 0 {
|
||
return text
|
||
}
|
||
|
||
runes := []rune(text)
|
||
if len(runes) <= maxRunes {
|
||
return text
|
||
}
|
||
return string(runes[:maxRunes])
|
||
}
|
||
|
||
func truncateReasoningSummaryRaw(raw *llmservice.TextResult) string {
|
||
if raw == nil {
|
||
return ""
|
||
}
|
||
text := strings.TrimSpace(raw.Text)
|
||
runes := []rune(text)
|
||
if len(runes) <= 200 {
|
||
return text
|
||
}
|
||
return string(runes[:200]) + "..."
|
||
}
|