Version: 0.8.4.dev.260329
后端: 1.新建newAgent文件夹,是的你没听错,刚刚搬迁完的旧结构又准备推翻了:因为通用性太差,用户需求复杂一点就招架不了。最新的架构已经在路上,这应该是这个项目的正确路线了,目前正在搭骨架。 前端: 无改动 全仓库: 无改动
This commit is contained in:
115
backend/newAgent/stream/emitter.go
Normal file
115
backend/newAgent/stream/emitter.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package newagentstream
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// PayloadEmitter 是真正向外层 SSE 管道写 chunk 的最小接口。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 这里刻意不用 chan/string 绑死实现;
|
||||
// 2. 上层既可以传“写 channel”的函数,也可以传“写 gin stream”的函数;
|
||||
// 3. 只要签名是 `func(string) error`,都能接进来。
|
||||
type PayloadEmitter func(payload string) error
|
||||
|
||||
// StageEmitter 是 graph/node 对“当前阶段”进行推送的最小接口。
|
||||
type StageEmitter func(stage, detail string)
|
||||
|
||||
// NoopPayloadEmitter 返回一个空实现,便于骨架期安全占位。
|
||||
func NoopPayloadEmitter() PayloadEmitter {
|
||||
return func(string) error { return nil }
|
||||
}
|
||||
|
||||
// NoopStageEmitter 返回一个空实现,避免 graph 在没有接前端时处处判空。
|
||||
func NoopStageEmitter() StageEmitter {
|
||||
return func(stage, detail string) {}
|
||||
}
|
||||
|
||||
// WrapStageEmitter 把可空函数包装成稳定的 StageEmitter。
|
||||
func WrapStageEmitter(fn func(stage, detail string)) StageEmitter {
|
||||
if fn == nil {
|
||||
return NoopStageEmitter()
|
||||
}
|
||||
return fn
|
||||
}
|
||||
|
||||
// EmitStageAsReasoning 把“阶段提示”伪装成 reasoning chunk 推给前端。
|
||||
//
|
||||
// 设计背景:
|
||||
// 1. 你当前 Apifox 只认思考块和正文块,因此阶段提示需要先借 reasoning_content 走通;
|
||||
// 2. 这样后续真正前端上线时,只需要在这一层换协议,而不必回到各 skill 重改 graph;
|
||||
// 3. 这里不拼花哨格式,只给出稳定、可读、可 grep 的文本。
|
||||
func EmitStageAsReasoning(emit PayloadEmitter, requestID, modelName string, created int64, stage, detail string, includeRole bool) error {
|
||||
if emit == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
text := BuildStageReasoningText(stage, detail)
|
||||
payload, err := ToOpenAIReasoningChunk(requestID, modelName, created, text, includeRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == "" {
|
||||
return nil
|
||||
}
|
||||
return emit(payload)
|
||||
}
|
||||
|
||||
// EmitAssistantReply 把一段完整正文作为 assistant chunk 推出。
|
||||
//
|
||||
// 注意:
|
||||
// 1. 这里是“整段发”,不是把文本强行拆碎;
|
||||
// 2. 这样后续如果某条链路不需要真流式,也可以复用统一出口;
|
||||
// 3. 真正按 token/chunk 细粒度流式输出,应由 llm.Stream + 上层循环处理。
|
||||
func EmitAssistantReply(emit PayloadEmitter, requestID, modelName string, created int64, content string, includeRole bool) error {
|
||||
if emit == nil {
|
||||
return nil
|
||||
}
|
||||
payload, err := ToOpenAIAssistantChunk(requestID, modelName, created, content, includeRole)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == "" {
|
||||
return nil
|
||||
}
|
||||
return emit(payload)
|
||||
}
|
||||
|
||||
// EmitFinish 统一输出 stop 结束块。
|
||||
func EmitFinish(emit PayloadEmitter, requestID, modelName string, created int64) error {
|
||||
if emit == nil {
|
||||
return nil
|
||||
}
|
||||
payload, err := ToOpenAIFinishStream(requestID, modelName, created)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if payload == "" {
|
||||
return nil
|
||||
}
|
||||
return emit(payload)
|
||||
}
|
||||
|
||||
// EmitDone 统一输出 OpenAI 兼容流式结束标记。
|
||||
func EmitDone(emit PayloadEmitter) error {
|
||||
if emit == nil {
|
||||
return nil
|
||||
}
|
||||
return emit("[DONE]")
|
||||
}
|
||||
|
||||
// BuildStageReasoningText 生成统一阶段提示文本。
|
||||
func BuildStageReasoningText(stage, detail string) string {
|
||||
stage = strings.TrimSpace(stage)
|
||||
detail = strings.TrimSpace(detail)
|
||||
|
||||
switch {
|
||||
case stage != "" && detail != "":
|
||||
return fmt.Sprintf("阶段:%s\n%s", stage, detail)
|
||||
case stage != "":
|
||||
return fmt.Sprintf("阶段:%s", stage)
|
||||
default:
|
||||
return detail
|
||||
}
|
||||
}
|
||||
102
backend/newAgent/stream/openai.go
Normal file
102
backend/newAgent/stream/openai.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package newagentstream
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/cloudwego/eino/schema"
|
||||
)
|
||||
|
||||
// OpenAIChunkResponse 是 OpenAI 兼容的流式 chunk DTO。
|
||||
//
|
||||
// 之所以单独放到 Agent/stream:
|
||||
// 1. 未来无论 quicknote、taskquery 还是 schedule,只要需要 SSE 都会复用这套协议壳;
|
||||
// 2. 这样 node/graph 层只关注“我要推什么内容”,不再自己拼 JSON;
|
||||
// 3. 后续如果前端协议升级,也能在这里集中改。
|
||||
type OpenAIChunkResponse struct {
|
||||
ID string `json:"id"`
|
||||
Object string `json:"object"`
|
||||
Created int64 `json:"created"`
|
||||
Model string `json:"model"`
|
||||
Choices []OpenAIChunkChoice `json:"choices"`
|
||||
}
|
||||
|
||||
// OpenAIChunkChoice 对应 OpenAI choices[0]。
|
||||
type OpenAIChunkChoice struct {
|
||||
Index int `json:"index"`
|
||||
Delta OpenAIChunkDelta `json:"delta"`
|
||||
FinishReason *string `json:"finish_reason"`
|
||||
}
|
||||
|
||||
// OpenAIChunkDelta 是真正承载 role/content/reasoning 的位置。
|
||||
type OpenAIChunkDelta struct {
|
||||
Role string `json:"role,omitempty"`
|
||||
Content string `json:"content,omitempty"`
|
||||
ReasoningContent string `json:"reasoning_content,omitempty"`
|
||||
}
|
||||
|
||||
// ToOpenAIStream 把 Eino message 转成 OpenAI 兼容 chunk。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责把 chunk.Content / chunk.ReasoningContent 映射到协议字段;
|
||||
// 2. 负责按 includeRole 决定是否在首块带上 assistant 角色;
|
||||
// 3. 不负责发送,也不负责决定“这个 chunk 该不该推”。
|
||||
func ToOpenAIStream(chunk *schema.Message, requestID, modelName string, created int64, includeRole bool) (string, error) {
|
||||
delta := OpenAIChunkDelta{}
|
||||
if includeRole {
|
||||
delta.Role = "assistant"
|
||||
}
|
||||
if chunk != nil {
|
||||
delta.Content = chunk.Content
|
||||
delta.ReasoningContent = chunk.ReasoningContent
|
||||
}
|
||||
return buildOpenAIChunkPayload(requestID, modelName, created, delta, nil)
|
||||
}
|
||||
|
||||
// ToOpenAIReasoningChunk 直接构造一个 reasoning chunk。
|
||||
func ToOpenAIReasoningChunk(requestID, modelName string, created int64, reasoning string, includeRole bool) (string, error) {
|
||||
delta := OpenAIChunkDelta{ReasoningContent: reasoning}
|
||||
if includeRole {
|
||||
delta.Role = "assistant"
|
||||
}
|
||||
return buildOpenAIChunkPayload(requestID, modelName, created, delta, nil)
|
||||
}
|
||||
|
||||
// ToOpenAIAssistantChunk 直接构造一个正文 chunk。
|
||||
func ToOpenAIAssistantChunk(requestID, modelName string, created int64, content string, includeRole bool) (string, error) {
|
||||
delta := OpenAIChunkDelta{Content: content}
|
||||
if includeRole {
|
||||
delta.Role = "assistant"
|
||||
}
|
||||
return buildOpenAIChunkPayload(requestID, modelName, created, delta, nil)
|
||||
}
|
||||
|
||||
// ToOpenAIFinishStream 生成流式结束 chunk(finish_reason=stop)。
|
||||
func ToOpenAIFinishStream(requestID, modelName string, created int64) (string, error) {
|
||||
stop := "stop"
|
||||
return buildOpenAIChunkPayload(requestID, modelName, created, OpenAIChunkDelta{}, &stop)
|
||||
}
|
||||
|
||||
func buildOpenAIChunkPayload(requestID, modelName string, created int64, delta OpenAIChunkDelta, finishReason *string) (string, error) {
|
||||
// 1. 若既没有 role,也没有正文/思考,也没有 finish_reason,则视为“空块”,直接跳过。
|
||||
// 2. 这样可以避免上层每次都自己写一遍空块判断。
|
||||
if delta.Role == "" && delta.Content == "" && delta.ReasoningContent == "" && finishReason == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
dto := OpenAIChunkResponse{
|
||||
ID: requestID,
|
||||
Object: "chat.completion.chunk",
|
||||
Created: created,
|
||||
Model: modelName,
|
||||
Choices: []OpenAIChunkChoice{{
|
||||
Index: 0,
|
||||
Delta: delta,
|
||||
FinishReason: finishReason,
|
||||
}},
|
||||
}
|
||||
data, err := json.Marshal(dto)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(data), nil
|
||||
}
|
||||
Reference in New Issue
Block a user