Version: 0.9.77.dev.260505

后端:
1.阶段 6 CP4/CP5 目录收口与共享边界纯化
- 将 backend 根目录收口为 services、client、gateway、cmd、shared 五个一级目录
- 收拢 bootstrap、inits、infra/kafka、infra/outbox、conv、respond、pkg、middleware,移除根目录旧实现与空目录
- 将 utils 下沉到 services/userauth/internal/auth,将 logic 下沉到 services/schedule/core/planning
- 将迁移期 runtime 桥接实现统一收拢到 services/runtime/{conv,dao,eventsvc,model},删除 shared/legacy 与未再被 import 的旧 service 实现
- 将 gateway/shared/respond 收口为 HTTP/Gin 错误写回适配,shared/respond 仅保留共享错误语义与状态映射
- 将 HTTP IdempotencyMiddleware 与 RateLimitMiddleware 收口到 gateway/middleware
- 将 GormCachePlugin 下沉到 shared/infra/gormcache,将共享 RateLimiter 下沉到 shared/infra/ratelimit,将 agent token budget 下沉到 services/agent/shared
- 删除 InitEino 兼容壳,收缩 cmd/internal/coreinit 仅保留旧组合壳残留域初始化语义
- 更新微服务迁移计划与桌面 checklist,补齐 CP4/CP5 当前切流点、目录终态与验证结果
- 完成 go test ./...、git diff --check 与最终真实 smoke;health、register/login、task/create+get、schedule/today、task-class/list、memory/items、agent chat/meta/timeline/context-stats 全部 200,SSE 合并结果为 CP5_OK 且 [DONE] 只有 1 个
This commit is contained in:
Losita
2026-05-05 23:25:07 +08:00
parent 2a96f4c6f9
commit 3b6fca44a6
226 changed files with 731 additions and 3497 deletions

View File

@@ -0,0 +1,52 @@
package conv
import (
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"github.com/cloudwego/eino/schema"
)
// ToEinoMessages 将数据库模型转换为 Eino 模型
func ToEinoMessages(dbMsgs []model.ChatHistory) []*schema.Message {
res := make([]*schema.Message, 0)
for _, m := range dbMsgs {
var role schema.RoleType
switch safeChatHistoryRole(m.Role) {
case "user":
role = schema.User
case "assistant":
role = schema.Assistant
default:
role = schema.System
}
msg := &schema.Message{
Role: role,
Content: safeChatHistoryText(m.MessageContent),
ReasoningContent: safeChatHistoryText(m.ReasoningContent),
}
// retry 机制已整体下线:历史数据里的 retry_* 列不再回灌到运行期上下文。
extra := make(map[string]any)
extra["history_id"] = m.ID
if m.ReasoningDurationSeconds > 0 {
extra["reasoning_duration_seconds"] = m.ReasoningDurationSeconds
}
if len(extra) > 0 {
msg.Extra = extra
}
res = append(res, msg)
}
return res
}
func safeChatHistoryRole(role *string) string {
if role == nil {
return ""
}
return *role
}
func safeChatHistoryText(text *string) string {
if text == nil {
return ""
}
return *text
}

View File

@@ -0,0 +1,481 @@
package conv
import (
"fmt"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
)
import "sort"
func SchedulesToScheduleConflictDetail(schedules []model.Schedule) []model.ScheduleConflictDetail {
if len(schedules) == 0 {
return []model.ScheduleConflictDetail{}
}
// 1. 使用 Map 进行逻辑分组
// Key 格式: EventID-Week-Day (防止同一事件在不同天出现时被混为一谈)
groups := make(map[string]*model.ScheduleConflictDetail)
for _, s := range schedules {
key := fmt.Sprintf("%d-%d-%d", s.EventID, s.Week, s.DayOfWeek)
if _, ok := groups[key]; !ok {
// 初始化该分组
groups[key] = &model.ScheduleConflictDetail{
EventID: s.EventID,
Name: s.Event.Name,
Location: *s.Event.Location, // 假设字段是 *string
Type: s.Event.Type,
Week: s.Week,
DayOfWeek: s.DayOfWeek,
}
}
// 将当前节次加入数组
groups[key].Sections = append(groups[key].Sections, s.Section)
}
// 2. 处理每个分组的区间逻辑
res := make([]model.ScheduleConflictDetail, 0, len(groups))
for _, detail := range groups {
// 排序节次,例如把 [3, 1, 2] 变成 [1, 2, 3]
sort.Ints(detail.Sections)
// 最小值即起始,最大值即结束
detail.StartSection = detail.Sections[0]
detail.EndSection = detail.Sections[len(detail.Sections)-1]
res = append(res, *detail)
}
// 3. 可选:对结果集按时间排序,让前端收到的 DTO 也是有序的
sort.Slice(res, func(i, j int) bool {
if res[i].Week != res[j].Week {
return res[i].Week < res[j].Week
}
if res[i].DayOfWeek != res[j].DayOfWeek {
return res[i].DayOfWeek < res[j].DayOfWeek
}
return res[i].StartSection < res[j].StartSection
})
return res
}
// SectionToTime 映射表:将原子节次转为起始/结束时间点
// 此处以重邮为例
var sectionTimeMap = map[int][2]string{
1: {"08:00", "08:45"}, 2: {"08:55", "09:40"},
3: {"10:15", "11:00"}, 4: {"11:10", "11:55"},
5: {"14:00", "14:45"}, 6: {"14:55", "15:40"},
7: {"16:15", "17:00"}, 8: {"17:10", "17:55"},
9: {"19:00", "19:45"}, 10: {"19:55", "20:40"},
11: {"20:50", "21:35"}, 12: {"21:45", "22:30"},
}
func SchedulesToUserTodaySchedule(schedules []model.Schedule) []model.UserTodaySchedule {
if len(schedules) == 0 {
return []model.UserTodaySchedule{}
}
// 1. 数据预处理:按 Week-Day 分组
dayGroups := make(map[string][]model.Schedule)
for _, s := range schedules {
dayKey := fmt.Sprintf("%d-%d", s.Week, s.DayOfWeek)
dayGroups[dayKey] = append(dayGroups[dayKey], s)
}
var result []model.UserTodaySchedule
for _, daySchedules := range dayGroups {
todayDTO := model.UserTodaySchedule{
Week: daySchedules[0].Week,
DayOfWeek: daySchedules[0].DayOfWeek,
Events: []model.EventBrief{},
}
// 💡 关键点:建立一个 Section 查找表,方便 O(1) 确定某节课是什么
sectionMap := make(map[int]model.Schedule)
for _, s := range daySchedules {
sectionMap[s.Section] = s
}
order := 1
// 💡 线性扫描:从第 1 节巡检到第 12 节
for curr := 1; curr <= 12; {
if slot, ok := sectionMap[curr]; ok {
// === A 场景:当前节次有课 ===
// 1. 寻找该事件的连续范围(比如 9-12 节连上)
// 我们向后探测,直到 EventID 变化或节次断开
end := curr
for next := curr + 1; next <= 12; next++ {
if nextSlot, exist := sectionMap[next]; exist && nextSlot.EventID == slot.EventID {
end = next
} else {
break
}
}
// 2. 封装 EventBrief
brief := model.EventBrief{
ID: slot.EventID,
Order: order,
Name: slot.Event.Name,
Location: *slot.Event.Location,
Type: slot.Event.Type,
StartTime: sectionTimeMap[curr][0],
EndTime: sectionTimeMap[end][1],
Span: end - curr + 1,
}
// 3. 处理嵌入任务
// 只要这几个连续节次里有一个有任务,就带上
for i := curr; i <= end; i++ {
if s, exist := sectionMap[i]; exist && s.EmbeddedTask != nil {
brief.EmbeddedTaskInfo = model.TaskBrief{
ID: s.EmbeddedTask.ID,
Name: *s.EmbeddedTask.Content,
Type: "task",
}
break
}
}
todayDTO.Events = append(todayDTO.Events, brief)
// 💡 指针跳跃:直接跳过已处理的节次
curr = end + 1
order++
} else {
// === B 场景当前节次没课Type = "empty" ===
// 逻辑按照学校标准大节1-2, 3-4...)进行空位合并
// 如果当前是奇数节1, 3, 5...)且下一节也没课,就合并成一个空块
emptyEnd := curr
if curr%2 != 0 && curr < 12 {
if _, nextHasClass := sectionMap[curr+1]; !nextHasClass {
emptyEnd = curr + 1
}
}
todayDTO.Events = append(todayDTO.Events, model.EventBrief{
ID: 0, // 空课 ID 为 0
Order: order,
Name: "无课",
Type: "empty",
StartTime: sectionTimeMap[curr][0],
EndTime: sectionTimeMap[emptyEnd][1],
Location: "休息时间",
})
curr = emptyEnd + 1
order++
}
}
result = append(result, todayDTO)
}
return result
}
func SchedulesToUserWeeklySchedule(schedules []model.Schedule) *model.UserWeekSchedule {
if len(schedules) == 0 {
return &model.UserWeekSchedule{
Week: 0,
Events: []model.WeeklyEventBrief{},
}
}
// 1. 初始化返回结构 (默认取第一条数据的周次)
weekDTO := &model.UserWeekSchedule{
Week: schedules[0].Week,
Events: []model.WeeklyEventBrief{},
}
// 2. 建立 [天][节次] 的快速索引地图
// indexMap[day][section] -> model.Schedule
indexMap := make(map[int]map[int]model.Schedule)
for d := 1; d <= 7; d++ {
indexMap[d] = make(map[int]model.Schedule)
}
for _, s := range schedules {
indexMap[s.DayOfWeek][s.Section] = s
}
// 3. 线性扫描 1-7 天
for day := 1; day <= 7; day++ {
order := 1 // 每一天开始时,内部显示顺序重置
// 4. 线性扫描 1-12 节
for curr := 1; curr <= 12; {
// 场景 A当前槽位有课/有任务
if slot, hasClass := indexMap[day][curr]; hasClass {
end := curr
// 探测逻辑:合并相同 EventID 的连续节次 (Span 计算)
for next := curr + 1; next <= 12; next++ {
if nextSlot, exist := indexMap[day][next]; exist && nextSlot.EventID == slot.EventID {
end = next
} else {
break
}
}
span := end - curr + 1
brief := model.WeeklyEventBrief{
ID: slot.EventID,
Order: order,
DayOfWeek: day,
Name: slot.Event.Name,
Location: *slot.Event.Location,
Type: slot.Event.Type,
StartTime: sectionTimeMap[curr][0], // 使用你定义的映射表
EndTime: sectionTimeMap[end][1],
Span: span,
}
// 提取嵌入任务信息 (逻辑同前,探测整个 Span)
for i := curr; i <= end; i++ {
if s, exist := indexMap[day][i]; exist && s.EmbeddedTask != nil {
brief.EmbeddedTaskInfo = model.TaskBrief{
ID: s.EmbeddedTask.ID,
Name: *s.EmbeddedTask.Content,
Type: "task",
}
break
}
}
weekDTO.Events = append(weekDTO.Events, brief)
curr = end + 1 // 指针跳跃到下一块
order++
} else {
// 场景 B无课 (Type="empty"),进行逻辑合并
emptyEnd := curr
// 奇数节起步且下一节也空,则合并为大节 (1-2, 3-4...)
if curr%2 != 0 && curr < 12 {
if _, nextHasClass := indexMap[day][curr+1]; !nextHasClass {
emptyEnd = curr + 1
}
}
weekDTO.Events = append(weekDTO.Events, model.WeeklyEventBrief{
ID: 0,
Order: order,
DayOfWeek: day,
Name: "无课",
Type: "empty",
StartTime: sectionTimeMap[curr][0],
EndTime: sectionTimeMap[emptyEnd][1],
Span: emptyEnd - curr + 1,
Location: "",
})
curr = emptyEnd + 1
order++
}
}
}
return weekDTO
}
func SchedulesToRecentCompletedSchedules(schedules []model.Schedule) *model.UserRecentCompletedScheduleResponse {
// 1. 初始化结果集,确保即使为空也返回空数组而非 nil
result := &model.UserRecentCompletedScheduleResponse{
Events: make([]model.RecentCompletedEventBrief, 0),
}
if len(schedules) == 0 {
return result
}
// 💡 核心去重地图key 是 EventID
seen := make(map[int]bool)
for _, s := range schedules {
// 2. 检查这个逻辑事件(课程或任务块)是否已经处理过
if seen[s.EventID] {
continue
}
// 3. 确定显示的“名分”和“类型”
displayName := s.Event.Name
displayType := s.Event.Type
// 🚀 关键逻辑:如果存在嵌入任务,则“鸠占鹊巢”
// 即使载体是 course只要里面塞了任务我们就对外宣称这是一个 task
if s.EmbeddedTask != nil && s.EmbeddedTask.Content != nil {
displayName = *s.EmbeddedTask.Content
displayType = "embedded_task"
}
// 4. 格式化结束时间 (即完成时间)
strTime := s.Event.EndTime.Format("2006-01-02 15:04:05")
// 5. 构造 Brief
temp := model.RecentCompletedEventBrief{
// ID 统一使用 EventID确保唯一性且方便前端追踪逻辑块
ID: s.EventID,
Name: displayName,
Type: displayType,
CompletedTime: strTime,
}
result.Events = append(result.Events, temp)
// 6. 标记该事件已处理
seen[s.EventID] = true
}
return result
}
func SchedulesToUserOngoingSchedule(schedules []model.Schedule) *model.OngoingSchedule {
if len(schedules) == 0 {
return nil
}
//取第一个 Schedule 的 Event 作为正在进行的事件
ongoing := schedules[0]
return &model.OngoingSchedule{
ID: ongoing.EventID,
Name: ongoing.Event.Name,
Type: ongoing.Event.Type,
Location: *ongoing.Event.Location,
StartTime: ongoing.Event.StartTime,
EndTime: ongoing.Event.EndTime,
}
}
// 这里我们使用一个临时的内部结构来兼容“实日程”和“虚计划”
type slotInfo struct {
schedule *model.Schedule
plan *model.TaskClassItem
}
func PlanningResultToUserWeekSchedules(userSchedule []model.Schedule, plans []model.TaskClassItem) []model.UserWeekSchedule {
// 1. 周次范围探测与数据分桶 (保持高效的 O(N) 复杂度)
minW, maxW := 25, 1
weekMap := make(map[int][]model.Schedule)
for _, s := range userSchedule {
if s.Week < minW {
minW = s.Week
}
if s.Week > maxW {
maxW = s.Week
}
weekMap[s.Week] = append(weekMap[s.Week], s)
}
planMap := make(map[int][]model.TaskClassItem)
for _, p := range plans {
if p.EmbeddedTime == nil {
continue
}
w := p.EmbeddedTime.Week
if w < minW {
minW = w
}
if w > maxW {
maxW = w
}
planMap[w] = append(planMap[w], p)
}
var results []model.UserWeekSchedule
for w := minW; w <= maxW; w++ {
// 构建当前周的逻辑网格
indexMap := make(map[int]map[int]slotInfo)
for d := 1; d <= 7; d++ {
indexMap[d] = make(map[int]slotInfo)
}
for _, s := range weekMap[w] {
indexMap[s.DayOfWeek][s.Section] = slotInfo{schedule: &s}
}
for _, p := range planMap[w] {
for sec := p.EmbeddedTime.SectionFrom; sec <= p.EmbeddedTime.SectionTo; sec++ {
info := indexMap[p.EmbeddedTime.DayOfWeek][sec]
info.plan = &p
indexMap[p.EmbeddedTime.DayOfWeek][sec] = info
}
}
weekDTO := &model.UserWeekSchedule{Week: w, Events: []model.WeeklyEventBrief{}}
for day := 1; day <= 7; day++ {
order := 1
for curr := 1; curr <= 12; {
slot := indexMap[day][curr]
if slot.schedule != nil || slot.plan != nil {
end := curr
// 🚀 修复逻辑 A精准探测合并边界
for next := curr + 1; next <= 12; next++ {
nextSlot := indexMap[day][next]
isSame := false
if slot.schedule != nil && nextSlot.schedule != nil {
// 场景:都是课,且是同一门课
isSame = slot.schedule.EventID == nextSlot.schedule.EventID
} else if slot.schedule == nil && nextSlot.schedule == nil && slot.plan != nil && nextSlot.plan != nil {
// 场景:都是新排任务,且是同一个 TaskItem (修复了之前会合并不同任务的 Bug)
isSame = slot.plan.ID == nextSlot.plan.ID
}
if isSame {
end = next
} else {
break
}
}
// 🚀 修复逻辑 B直接计算 span 并传值,消除重复计算
span := end - curr + 1
brief := buildBrief(slot, day, curr, end, span, order)
weekDTO.Events = append(weekDTO.Events, brief)
curr = end + 1
order++
} else {
// 场景 B留空处理 (逻辑保持原子化)
emptyEnd := curr
if curr%2 != 0 && curr < 12 {
if next := indexMap[day][curr+1]; next.schedule == nil && next.plan == nil {
emptyEnd = curr + 1
}
}
weekDTO.Events = append(weekDTO.Events, model.WeeklyEventBrief{
Name: "无课", Type: "empty", DayOfWeek: day, Order: order,
StartTime: sectionTimeMap[curr][0], EndTime: sectionTimeMap[emptyEnd][1],
Span: emptyEnd - curr + 1,
})
curr = emptyEnd + 1
order++
}
}
}
results = append(results, *weekDTO)
}
return results
}
func buildBrief(slot slotInfo, day, start, end, span, order int) model.WeeklyEventBrief {
brief := model.WeeklyEventBrief{
DayOfWeek: day,
Order: order,
StartTime: sectionTimeMap[start][0],
EndTime: sectionTimeMap[end][1],
Span: span,
Status: "normal", // 默认设为正常状态
}
if slot.schedule != nil {
// 场景 A它是数据库里原有的课 (实日程)
brief.ID = slot.schedule.EventID
brief.Name = slot.schedule.Event.Name
brief.Location = *slot.schedule.Event.Location
brief.Type = slot.schedule.Event.Type
// 如果这节课里被算法“塞”进了一个计划任务
if slot.plan != nil {
brief.Status = "suggested" // 标记为建议状态,前端据此高亮整块
brief.EmbeddedTaskInfo = model.TaskBrief{
ID: slot.plan.ID,
Name: *slot.plan.Content,
Type: "task",
}
}
} else if slot.plan != nil {
// 场景 B它是算法在空地新建的任务块 (虚日程)
brief.Name = *slot.plan.Content
brief.Type = "task"
brief.Status = "suggested" // 标记为建议状态
brief.ID = slot.plan.ID // 虚日程的 ID 直接使用 TaskClassItem 的 ID方便前端追踪和操作
}
return brief
}

View File

@@ -0,0 +1,218 @@
package conv
import (
"errors"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"github.com/LoveLosita/smartflow/backend/shared/respond"
)
const dateLayout = "2006-01-02"
func parseDatePtr(s string) (*time.Time, error) {
if s == "" {
return nil, nil
}
t, err := time.ParseInLocation(dateLayout, s, time.Local)
if err != nil {
return nil, err
}
return &t, nil
}
func ProcessUserAddTaskClassRequest(req *model.UserAddTaskClassRequest, userID int) (*model.TaskClass, []model.TaskClassItem, error) {
startDate, err := parseDatePtr(req.StartDate)
if err != nil {
return nil, nil, respond.WrongParamType
}
endDate, err := parseDatePtr(req.EndDate)
if err != nil {
return nil, nil, respond.WrongParamType
}
//1.填充section1,2
taskClass := model.TaskClass{
Name: &req.Name,
Mode: &req.Mode,
StartDate: startDate,
EndDate: endDate,
SubjectType: stringPtrOrNil(req.SubjectType),
DifficultyLevel: stringPtrOrNil(req.DifficultyLevel),
CognitiveIntensity: stringPtrOrNil(req.CognitiveIntensity),
UserID: &userID,
}
//2.填充section3
taskClass.TotalSlots = &req.Config.TotalSlots
taskClass.AllowFillerCourse = &req.Config.AllowFillerCourse
taskClass.Strategy = &req.Config.Strategy
/*//处理 ExcludedSlots 切片为 JSON 字符串
if len(req.Config.ExcludedSlots) > 0 {
//转换为 JSON 字符串
excludedSlotsJSON := "["
for i, slot := range req.Config.ExcludedSlots {
excludedSlotsJSON += string(rune(slot + '0')) //简单转换为字符
if i != len(req.Config.ExcludedSlots)-1 {
excludedSlotsJSON += ","
}
}
excludedSlotsJSON += "]"
taskClass.ExcludedSlots = &excludedSlotsJSON
} else {
emptyJSON := "[]"
taskClass.ExcludedSlots = &emptyJSON
}*/
taskClass.ExcludedSlots = req.Config.ExcludedSlots // 直接复用 IntSlice 类型,前端也能正确解析为 []int
taskClass.ExcludedDaysOfWeek = req.Config.ExcludedDaysOfWeek
//3.开始构建 items
var items []model.TaskClassItem
for _, itemReq := range req.Items {
item := model.TaskClassItem{ //填充section 2
Order: &itemReq.Order,
Content: &itemReq.Content,
EmbeddedTime: itemReq.EmbeddedTime,
Status: nil,
}
items = append(items, item)
}
return &taskClass, items, nil
}
func timeOrZero(t *time.Time) time.Time {
if t == nil {
return time.Time{}
}
return *t
}
func TaskClassModelToResponse(taskClasses []model.TaskClass) *model.UserGetTaskClassesResponse {
var resp model.UserGetTaskClassesResponse
for _, tc := range taskClasses {
tcResp := model.TaskClassSummary{
ID: tc.ID,
Name: *tc.Name,
Mode: *tc.Mode,
StartDate: timeOrZero(tc.StartDate),
EndDate: timeOrZero(tc.EndDate),
TotalSlots: *tc.TotalSlots,
Strategy: *tc.Strategy,
SubjectType: safeStr(tc.SubjectType),
DifficultyLevel: safeStr(tc.DifficultyLevel),
CognitiveIntensity: safeStr(tc.CognitiveIntensity),
}
resp.TaskClasses = append(resp.TaskClasses, tcResp)
}
return &resp
}
func ProcessUserGetCompleteTaskClassRequest(taskClass *model.TaskClass) (*model.UserAddTaskClassRequest, error) {
if taskClass == nil {
return nil, errors.New("源数据对象不可为空")
}
// 1. 映射基础信息 (处理指针解引用)
req := &model.UserAddTaskClassRequest{
Name: safeStr(taskClass.Name),
Mode: safeStr(taskClass.Mode),
StartDate: formatTime(taskClass.StartDate),
EndDate: formatTime(taskClass.EndDate),
SubjectType: safeStr(taskClass.SubjectType),
DifficultyLevel: safeStr(taskClass.DifficultyLevel),
CognitiveIntensity: safeStr(taskClass.CognitiveIntensity),
}
// 2. 映射配置信息 (Config Section)
req.Config = model.UserAddTaskClassConfig{
TotalSlots: safeInt(taskClass.TotalSlots),
AllowFillerCourse: safeBool(taskClass.AllowFillerCourse),
Strategy: safeStr(taskClass.Strategy),
}
/*// 3. 处理 ExcludedSlots JSON 字符串 -> []int
if taskClass.ExcludedSlots != nil && *taskClass.ExcludedSlots != "" {
var excluded []int
// 直接使用标准反序列化,比手动处理 rune 字符要健壮得多
if err := json.Unmarshal([]byte(*taskClass.ExcludedSlots), &excluded); err == nil {
req.Config.ExcludedSlots = excluded
}
}*/
req.Config.ExcludedSlots = taskClass.ExcludedSlots // 直接复用 IntSlice 类型,前端也能正确解析为 []int
req.Config.ExcludedDaysOfWeek = taskClass.ExcludedDaysOfWeek
// 4. 映射子项信息 (Items Section)
// 此时 items 已经通过 Preload 加载到了 taskClass.Items 中
req.Items = make([]model.UserAddTaskClassItemRequest, 0, len(taskClass.Items))
for _, item := range taskClass.Items {
itemReq := model.UserAddTaskClassItemRequest{
ID: item.ID, // 填充数据库主键 ID前端拖拽编排依赖此字段
Order: safeInt(item.Order),
Content: safeStr(item.Content),
EmbeddedTime: item.EmbeddedTime, // 结构体指针直接复用
}
req.Items = append(req.Items, itemReq)
}
return req, nil
}
// UserInsertTaskItemRequestToModel 用于将填入空闲时段日程的请求转换为 Schedule 模型
func UserInsertTaskItemRequestToModel(req *model.UserInsertTaskClassItemToScheduleRequest, item *model.TaskClassItem, taskID *int, userID, startSection, endSection int) ([]model.Schedule, *model.ScheduleEvent, error) {
var schedules []model.Schedule
for section := startSection; section <= endSection; section++ {
req1 := &model.Schedule{
UserID: userID,
EmbeddedTaskID: taskID,
Week: req.Week,
DayOfWeek: req.DayOfWeek,
Section: section,
Status: "normal",
}
schedules = append(schedules, *req1)
}
startTime, endTime, err := RelativeTimeToRealTime(req.Week, req.DayOfWeek, startSection, endSection)
if err != nil {
return nil, nil, err
}
req2 := &model.ScheduleEvent{
UserID: userID, // 由调用方填充
Name: safeStr(item.Content), // 任务内容作为事件名称
Type: "task",
RelID: &item.ID, // 关联到 TaskClassItem 的 ID
CanBeEmbedded: false, // 任务事件允许嵌入其他任务(如果需要的话)
StartTime: startTime,
EndTime: endTime,
}
return schedules, req2, nil
}
// --- 🛡️ 辅助工具函数:保持代码清爽并防止 Panic ---
func safeStr(s *string) string {
if s == nil {
return ""
}
return *s
}
func safeInt(i *int) int {
if i == nil {
return 0
}
return *i
}
func stringPtrOrNil(value string) *string {
if value == "" {
return nil
}
return &value
}
func safeBool(b *bool) bool {
if b == nil {
return true
}
return *b
}
func formatTime(t *time.Time) string {
if t == nil {
return ""
}
// 务必使用 2006-01-02 格式以匹配前端校验
return t.Format("2006-01-02")
}

View File

@@ -0,0 +1,94 @@
package conv
import (
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
)
func UserAddTaskRequestToModel(request *model.UserAddTaskRequest, userID int) *model.Task {
return &model.Task{
Title: request.Title,
Priority: request.PriorityGroup,
EstimatedSections: model.NormalizeEstimatedSections(&request.EstimatedSections),
DeadlineAt: request.DeadlineAt,
UrgencyThresholdAt: request.UrgencyThresholdAt,
UserID: userID,
}
}
func ModelToUserAddTaskResponse(task *model.Task) *model.UserAddTaskResponse {
status := "incomplete"
if task.IsCompleted {
status = "completed"
}
return &model.UserAddTaskResponse{
ID: task.ID,
Title: task.Title,
PriorityGroup: task.Priority,
EstimatedSections: model.NormalizeEstimatedSections(&task.EstimatedSections),
DeadlineAt: task.DeadlineAt,
Status: status,
CreatedAt: time.Now(), // 创建时间使用当前服务时间,保持既有响应语义。
}
}
func ModelToGetUserTasksResp(tasks []model.Task) []model.GetUserTaskResp {
var resp []model.GetUserTaskResp
for _, task := range tasks {
status := "incomplete"
if task.IsCompleted {
status = "completed"
}
deadline := ""
if task.DeadlineAt != nil {
deadline = task.DeadlineAt.Format("2006-01-02 15:04:05")
}
urgencyThreshold := ""
if task.UrgencyThresholdAt != nil {
urgencyThreshold = task.UrgencyThresholdAt.Format("2006-01-02 15:04:05")
}
resp = append(resp, model.GetUserTaskResp{
ID: task.ID,
UserID: task.UserID,
Title: task.Title,
PriorityGroup: task.Priority,
EstimatedSections: model.NormalizeEstimatedSections(&task.EstimatedSections),
Status: status,
Deadline: deadline,
IsCompleted: task.IsCompleted,
UrgencyThresholdAt: urgencyThreshold,
})
}
return resp
}
// ModelToGetUserTaskResp 将单个 Task 模型转换为 GetUserTaskResp。
func ModelToGetUserTaskResp(task *model.Task) model.GetUserTaskResp {
status := "incomplete"
if task.IsCompleted {
status = "completed"
}
deadline := ""
if task.DeadlineAt != nil {
deadline = task.DeadlineAt.Format("2006-01-02 15:04:05")
}
urgencyThreshold := ""
if task.UrgencyThresholdAt != nil {
urgencyThreshold = task.UrgencyThresholdAt.Format("2006-01-02 15:04:05")
}
return model.GetUserTaskResp{
ID: task.ID,
UserID: task.UserID,
Title: task.Title,
PriorityGroup: task.Priority,
EstimatedSections: model.NormalizeEstimatedSections(&task.EstimatedSections),
Status: status,
Deadline: deadline,
IsCompleted: task.IsCompleted,
UrgencyThresholdAt: urgencyThreshold,
}
}

View File

@@ -0,0 +1,149 @@
package conv
import (
"errors"
"fmt"
"time"
"github.com/LoveLosita/smartflow/backend/shared/respond"
"github.com/spf13/viper"
)
// DateFormat 此处定义一个全局常量,确保在整个代码中使用统一的日期格式解析和格式化
const DateFormat = "2006-01-02"
// RealDateToRelativeDate 将绝对日期转换为相对日期(格式: "week-day"
func RealDateToRelativeDate(realDate string) (int, int, error) {
SemesterStartDate := viper.GetString("time.semesterStartDate") // 从配置文件中读取学期开始日期
SemesterEndDate := viper.GetString("time.semesterEndDate") // 从配置文件中读取学期结束日期
t, err := time.Parse(DateFormat, realDate)
if err != nil {
return 0, 0, err
}
start, err := time.Parse(DateFormat, SemesterStartDate)
if err != nil {
return 0, 0, err
}
end, err := time.Parse(DateFormat, SemesterEndDate)
if err != nil {
return 0, 0, err
}
// 边界校验:日期必须在学期范围内
if t.Before(start) || t.After(end) {
return 0, 0, errors.New("日期超出学期范围")
}
// 计算天数差值注意24小时为一个基准天
days := int(t.Sub(start).Hours() / 24)
// 计算周数和星期
// 假设 SemesterStartDate 对应第 1 周,周 1
week := (days / 7) + 1
dayOfWeek := (days % 7) + 1
return week, dayOfWeek, nil
}
// RelativeDateToRealDate 将相对日期转换为绝对日期(输入格式: "week-day"
func RelativeDateToRealDate(week, dayOfWeek int) (string, error) {
SemesterStartDate := viper.GetString("time.semesterStartDate") // 从配置文件中读取学期开始日期
SemesterEndDate := viper.GetString("time.semesterEndDate") // 从配置文件中读取学期结束日期
start, _ := time.Parse(DateFormat, SemesterStartDate)
// 核心转换逻辑:(周-1)*7 + (天-1)
offsetDays := (week-1)*7 + (dayOfWeek - 1)
targetDate := start.AddDate(0, 0, offsetDays)
// 校验计算出的日期是否超出学期结束日期
end, _ := time.Parse(DateFormat, SemesterEndDate)
if targetDate.After(end) {
return "", respond.TimeOutOfRangeOfThisSemester
}
return targetDate.Format(DateFormat), nil
}
type SectionTime struct {
Start string // 第一个开始
End string // 第一个结束
}
var SectionTimeMap2 = map[int]SectionTime{
1: {Start: "08:00", End: "08:45"},
2: {Start: "08:55", End: "09:40"},
3: {Start: "10:15", End: "11:00"},
4: {Start: "11:10", End: "11:55"},
5: {Start: "14:00", End: "14:45"},
6: {Start: "14:55", End: "15:40"},
7: {Start: "16:15", End: "17:00"},
8: {Start: "17:10", End: "17:55"},
9: {Start: "19:00", End: "19:45"},
10: {Start: "19:55", End: "20:40"},
11: {Start: "20:50", End: "21:35"},
12: {Start: "21:45", End: "22:30"},
}
func RelativeTimeToRealTime(week, dayOfWeek, startSection, endSection int) (time.Time, time.Time, error) {
// 1. 安全校验
if startSection > endSection {
return time.Time{}, time.Time{}, respond.InvalidSectionRange
}
startTimeInfo, okStart := SectionTimeMap2[startSection]
endTimeInfo, okEnd := SectionTimeMap2[endSection]
if !okStart || !okEnd {
return time.Time{}, time.Time{}, respond.InvalidSectionNumber
}
if week < 1 || dayOfWeek < 1 || dayOfWeek > 7 {
return time.Time{}, time.Time{}, respond.InvalidWeekOrDayOfWeek
}
// 2. 计算目标日期
// 偏移天数 = (周数-1)*7 + (周几-1)
daysOffset := (week-1)*7 + (dayOfWeek - 1)
TermStartDate := viper.GetString("time.semesterStartDate") // 从配置文件中读取学期开始日期
baseDate, _ := time.Parse("2006-01-02", TermStartDate)
targetDate := baseDate.AddDate(0, 0, daysOffset)
dateStr := targetDate.Format("2006-01-02")
// 3. 锁定时区 (Asia/Shanghai)
timeZone := viper.GetString("time.zone") // 从配置文件中读取时区
loc, _ := time.LoadLocation(timeZone)
// 拼接:起始节次的 Start 和 结束节次的 End
startFullStr := fmt.Sprintf("%s %s", dateStr, startTimeInfo.Start)
endFullStr := fmt.Sprintf("%s %s", dateStr, endTimeInfo.End)
startTime, err := time.ParseInLocation("2006-01-02 15:04", startFullStr, loc)
if err != nil {
return time.Time{}, time.Time{}, err
}
endTime, err := time.ParseInLocation("2006-01-02 15:04", endFullStr, loc)
if err != nil {
return time.Time{}, time.Time{}, err
}
return startTime, endTime, nil
}
func CalculateFirstDayOfWeek(date time.Time) time.Time {
// 计算当前日期是周几0-60表示周日
weekday := int(date.Weekday())
if weekday == 0 {
weekday = 7 // 将周日调整为7方便计算
}
// 计算距离周一的天数偏移
offset := weekday - 1
// 计算本周一的日期
firstDayOfWeek := date.AddDate(0, 0, -offset)
return firstDayOfWeek
}
func CalculateLastDayOfWeek(date time.Time) time.Time {
// 计算当前日期是周几0-60表示周日
weekday := int(date.Weekday())
if weekday == 0 {
weekday = 7 // 将周日调整为7方便计算
}
// 计算距离周日的天数偏移
offset := 7 - weekday
// 计算本周日的日期
lastDayOfWeek := date.AddDate(0, 0, offset)
return lastDayOfWeek
}

View File

@@ -0,0 +1,310 @@
package dao
import (
"context"
"errors"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// ActiveScheduleDAO 管理主动调度阶段 1 的自有表。
//
// 职责边界:
// 1. 只负责 active_schedule_jobs / triggers / previews 的基础读写;
// 2. 不负责构造候选、调用 LLM、投递 provider 或写正式日程;
// 3. 幂等查询只按持久化键读取事实,是否复用结果由上层状态机判断。
type ActiveScheduleDAO struct {
db *gorm.DB
}
func NewActiveScheduleDAO(db *gorm.DB) *ActiveScheduleDAO {
return &ActiveScheduleDAO{db: db}
}
func (d *ActiveScheduleDAO) WithTx(tx *gorm.DB) *ActiveScheduleDAO {
return &ActiveScheduleDAO{db: tx}
}
func (d *ActiveScheduleDAO) ensureDB() error {
if d == nil || d.db == nil {
return errors.New("active schedule dao 未初始化")
}
return nil
}
// CreateOrUpdateJob 按 job.id 幂等创建或覆盖主动调度 job。
//
// 职责边界:
// 1. 只按主键 upsert 当前传入的 job 快照;
// 2. 不判断 task 是否仍满足主动调度条件,该判断由 job scanner 读取 task 真值后完成;
// 3. 调用方需要保证 ID 稳定,例如按 task_id 当前有效 job 或生成 asj_*。
func (d *ActiveScheduleDAO) CreateOrUpdateJob(ctx context.Context, job *model.ActiveScheduleJob) error {
if err := d.ensureDB(); err != nil {
return err
}
if job == nil || job.ID == "" {
return errors.New("active schedule job 不能为空且必须包含 id")
}
return d.db.WithContext(ctx).
Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "id"}},
UpdateAll: true,
}).
Create(job).Error
}
// UpdateJobFields 按 job_id 更新指定字段。
//
// 职责边界:
// 1. 只执行局部字段更新,不隐式改变其它状态;
// 2. updates 为空时直接返回 nil方便上层按条件拼装更新
// 3. 不做状态机合法性校验,状态流转由 active_scheduler/job 负责。
func (d *ActiveScheduleDAO) UpdateJobFields(ctx context.Context, jobID string, updates map[string]any) error {
if err := d.ensureDB(); err != nil {
return err
}
if jobID == "" {
return errors.New("active schedule job id 不能为空")
}
if len(updates) == 0 {
return nil
}
return d.db.WithContext(ctx).
Model(&model.ActiveScheduleJob{}).
Where("id = ?", jobID).
Updates(updates).Error
}
func (d *ActiveScheduleDAO) GetJobByID(ctx context.Context, jobID string) (*model.ActiveScheduleJob, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if jobID == "" {
return nil, gorm.ErrRecordNotFound
}
var job model.ActiveScheduleJob
err := d.db.WithContext(ctx).Where("id = ?", jobID).First(&job).Error
if err != nil {
return nil, err
}
return &job, nil
}
// FindPendingJobByTask 查询某个 task 当前待触发 job。
//
// 说明:
// 1. 用于 task 创建/更新时决定复用还是覆盖当前有效 job
// 2. 只查 pending已 triggered/canceled/skipped 的历史 job 保留审计,不再被覆盖。
func (d *ActiveScheduleDAO) FindPendingJobByTask(ctx context.Context, userID int, taskID int) (*model.ActiveScheduleJob, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if userID <= 0 || taskID <= 0 {
return nil, gorm.ErrRecordNotFound
}
var job model.ActiveScheduleJob
err := d.db.WithContext(ctx).
Where("user_id = ? AND task_id = ? AND status = ?", userID, taskID, model.ActiveScheduleJobStatusPending).
Order("trigger_at ASC, created_at ASC").
First(&job).Error
if err != nil {
return nil, err
}
return &job, nil
}
// ListDueJobs 读取到期且仍待触发的 job。
//
// 失败处理:
// 1. 参数非法时返回空列表,避免 worker 因配置抖动误扫全表;
// 2. 数据库错误直接返回,让上层按扫描器策略记录并重试。
func (d *ActiveScheduleDAO) ListDueJobs(ctx context.Context, now time.Time, limit int) ([]model.ActiveScheduleJob, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if limit <= 0 || now.IsZero() {
return []model.ActiveScheduleJob{}, nil
}
var jobs []model.ActiveScheduleJob
err := d.db.WithContext(ctx).
Where("status = ? AND trigger_at <= ?", model.ActiveScheduleJobStatusPending, now).
Order("trigger_at ASC, id ASC").
Limit(limit).
Find(&jobs).Error
if err != nil {
return nil, err
}
return jobs, nil
}
func (d *ActiveScheduleDAO) CreateTrigger(ctx context.Context, trigger *model.ActiveScheduleTrigger) error {
if err := d.ensureDB(); err != nil {
return err
}
if trigger == nil || trigger.ID == "" {
return errors.New("active schedule trigger 不能为空且必须包含 id")
}
return d.db.WithContext(ctx).Create(trigger).Error
}
// UpdateTriggerFields 按 trigger_id 局部更新触发状态。
//
// 职责边界:
// 1. 只提供字段更新能力,不判断 pending -> processing -> preview_generated 是否合规;
// 2. 上层若需要 CAS 状态流转,应在 updates 外自行加 where 条件或后续扩展专用方法;
// 3. updates 为空时直接返回 nil。
func (d *ActiveScheduleDAO) UpdateTriggerFields(ctx context.Context, triggerID string, updates map[string]any) error {
if err := d.ensureDB(); err != nil {
return err
}
if triggerID == "" {
return errors.New("active schedule trigger id 不能为空")
}
if len(updates) == 0 {
return nil
}
return d.db.WithContext(ctx).
Model(&model.ActiveScheduleTrigger{}).
Where("id = ?", triggerID).
Updates(updates).Error
}
func (d *ActiveScheduleDAO) GetTriggerByID(ctx context.Context, triggerID string) (*model.ActiveScheduleTrigger, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if triggerID == "" {
return nil, gorm.ErrRecordNotFound
}
var trigger model.ActiveScheduleTrigger
err := d.db.WithContext(ctx).Where("id = ?", triggerID).First(&trigger).Error
if err != nil {
return nil, err
}
return &trigger, nil
}
// FindTriggerByDedupeKey 查询触发去重键对应的最近 trigger。
//
// 说明:
// 1. important_urgent_task 使用 user_id + trigger_type + target + 30 分钟窗口构造 dedupe_key
// 2. unfinished_feedback 可把反馈幂等键放入 dedupe_key
// 3. statuses 为空时读取所有状态,方便调用方按场景选择是否复用 failed 记录。
func (d *ActiveScheduleDAO) FindTriggerByDedupeKey(ctx context.Context, dedupeKey string, statuses []string) (*model.ActiveScheduleTrigger, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if dedupeKey == "" {
return nil, gorm.ErrRecordNotFound
}
query := d.db.WithContext(ctx).
Where("dedupe_key = ?", dedupeKey)
if len(statuses) > 0 {
query = query.Where("status IN ?", statuses)
}
var trigger model.ActiveScheduleTrigger
err := query.Order("created_at DESC, id DESC").First(&trigger).Error
if err != nil {
return nil, err
}
return &trigger, nil
}
// FindTriggerByIdempotencyKey 查询 API/用户反馈幂等键对应的 trigger。
func (d *ActiveScheduleDAO) FindTriggerByIdempotencyKey(ctx context.Context, userID int, triggerType string, idempotencyKey string) (*model.ActiveScheduleTrigger, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if userID <= 0 || triggerType == "" || idempotencyKey == "" {
return nil, gorm.ErrRecordNotFound
}
var trigger model.ActiveScheduleTrigger
err := d.db.WithContext(ctx).
Where("user_id = ? AND trigger_type = ? AND idempotency_key = ?", userID, triggerType, idempotencyKey).
Order("created_at DESC, id DESC").
First(&trigger).Error
if err != nil {
return nil, err
}
return &trigger, nil
}
func (d *ActiveScheduleDAO) CreatePreview(ctx context.Context, preview *model.ActiveSchedulePreview) error {
if err := d.ensureDB(); err != nil {
return err
}
if preview == nil || preview.ID == "" {
return errors.New("active schedule preview 不能为空且必须包含 preview_id")
}
return d.db.WithContext(ctx).Create(preview).Error
}
func (d *ActiveScheduleDAO) UpdatePreviewFields(ctx context.Context, previewID string, updates map[string]any) error {
if err := d.ensureDB(); err != nil {
return err
}
if previewID == "" {
return errors.New("active schedule preview id 不能为空")
}
if len(updates) == 0 {
return nil
}
return d.db.WithContext(ctx).
Model(&model.ActiveSchedulePreview{}).
Where("preview_id = ?", previewID).
Updates(updates).Error
}
func (d *ActiveScheduleDAO) GetPreviewByID(ctx context.Context, previewID string) (*model.ActiveSchedulePreview, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if previewID == "" {
return nil, gorm.ErrRecordNotFound
}
var preview model.ActiveSchedulePreview
err := d.db.WithContext(ctx).Where("preview_id = ?", previewID).First(&preview).Error
if err != nil {
return nil, err
}
return &preview, nil
}
func (d *ActiveScheduleDAO) GetPreviewByTriggerID(ctx context.Context, triggerID string) (*model.ActiveSchedulePreview, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if triggerID == "" {
return nil, gorm.ErrRecordNotFound
}
var preview model.ActiveSchedulePreview
err := d.db.WithContext(ctx).
Where("trigger_id = ?", triggerID).
Order("created_at DESC").
First(&preview).Error
if err != nil {
return nil, err
}
return &preview, nil
}
// FindPreviewByApplyIdempotencyKey 查询 confirm 重试时的预览应用状态。
func (d *ActiveScheduleDAO) FindPreviewByApplyIdempotencyKey(ctx context.Context, previewID string, idempotencyKey string) (*model.ActiveSchedulePreview, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if previewID == "" || idempotencyKey == "" {
return nil, gorm.ErrRecordNotFound
}
var preview model.ActiveSchedulePreview
err := d.db.WithContext(ctx).
Where("preview_id = ? AND apply_idempotency_key = ?", previewID, idempotencyKey).
First(&preview).Error
if err != nil {
return nil, err
}
return &preview, nil
}

View File

@@ -0,0 +1,438 @@
package dao
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
var activeScheduleSessionLiveStatuses = []string{
model.ActiveScheduleSessionStatusWaitingUserReply,
model.ActiveScheduleSessionStatusRerunning,
}
// ActiveScheduleSessionDAO 负责主动调度会话的数据库读写。
//
// 职责边界:
// 1. 只管 session 表本身,不管聊天入口拦截策略;
// 2. 只提供按 session_id / conversation_id 的读写能力,不编排 graph
// 3. cache 命中策略由上层决定,这里始终把 MySQL 当作最终真相。
type ActiveScheduleSessionDAO struct {
db *gorm.DB
}
// NewActiveScheduleSessionDAO 创建主动调度会话 DAO。
func NewActiveScheduleSessionDAO(db *gorm.DB) *ActiveScheduleSessionDAO {
return &ActiveScheduleSessionDAO{db: db}
}
// WithTx 基于外部事务句柄构造同事务 DAO。
func (d *ActiveScheduleSessionDAO) WithTx(tx *gorm.DB) *ActiveScheduleSessionDAO {
return &ActiveScheduleSessionDAO{db: tx}
}
func (d *ActiveScheduleSessionDAO) ensureDB() error {
if d == nil || d.db == nil {
return errors.New("active schedule session dao 未初始化")
}
return nil
}
// UpsertActiveScheduleSession 按 session_id 幂等写入或覆盖主动调度会话。
//
// 步骤化说明:
// 1. 先校验主键、归属用户和状态,避免把脏会话写进数据表;
// 2. 再把轻量 state 统一序列化为 state_json保证数据库侧格式稳定
// 3. 最后走 OnConflict upsert保留 created_at仅刷新业务字段和 updated_at。
func (d *ActiveScheduleSessionDAO) UpsertActiveScheduleSession(ctx context.Context, snapshot *model.ActiveScheduleSessionSnapshot) error {
if err := d.ensureDB(); err != nil {
return err
}
normalized, err := normalizeActiveScheduleSessionSnapshot(snapshot)
if err != nil {
return err
}
stateJSON, err := marshalActiveScheduleSessionState(normalized.State)
if err != nil {
return fmt.Errorf("marshal active schedule session state failed: %w", err)
}
now := time.Now()
row := model.ActiveScheduleSession{
SessionID: normalized.SessionID,
UserID: normalized.UserID,
ConversationID: nullableStringPtr(normalized.ConversationID),
TriggerID: normalized.TriggerID,
CurrentPreviewID: nullableStringPtr(normalized.CurrentPreviewID),
Status: normalized.Status,
StateJSON: stateJSON,
CreatedAt: normalized.CreatedAt,
UpdatedAt: now,
}
if row.CreatedAt.IsZero() {
row.CreatedAt = now
}
return d.db.WithContext(ctx).Clauses(clause.OnConflict{
Columns: []clause.Column{
{Name: "session_id"},
},
DoUpdates: clause.Assignments(map[string]any{
"user_id": row.UserID,
"conversation_id": row.ConversationID,
"trigger_id": row.TriggerID,
"current_preview_id": row.CurrentPreviewID,
"status": row.Status,
"state_json": row.StateJSON,
"updated_at": row.UpdatedAt,
}),
}).Create(&row).Error
}
// GetActiveScheduleSessionBySessionID 按 session_id 读取任意状态的会话记录。
//
// 返回语义:
// 1. 命中:返回 snapshot, nil
// 2. 未命中:返回 nil, nil交给上层判断是否需要走回源或新建
// 3. 数据损坏:返回 error避免把坏状态继续传给拦截逻辑。
func (d *ActiveScheduleSessionDAO) GetActiveScheduleSessionBySessionID(ctx context.Context, sessionID string) (*model.ActiveScheduleSessionSnapshot, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
normalizedSessionID := strings.TrimSpace(sessionID)
if normalizedSessionID == "" {
return nil, errors.New("session_id is empty")
}
var row model.ActiveScheduleSession
err := d.db.WithContext(ctx).
Where("session_id = ?", normalizedSessionID).
First(&row).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return activeScheduleSessionSnapshotFromRow(&row)
}
// GetActiveScheduleSessionByConversationID 按 user_id + conversation_id 读取最新的会话记录。
//
// 职责边界:
// 1. 始终返回同一 conversation 最新的一条记录,方便上层直接判断当前 status
// 2. 不在 DAO 内部做“是否拦截”的业务裁决,避免把路由规则写死在存储层;
// 3. 若同一 conversation 误写出多条记录,按最近更新时间优先返回。
func (d *ActiveScheduleSessionDAO) GetActiveScheduleSessionByConversationID(ctx context.Context, userID int, conversationID string) (*model.ActiveScheduleSessionSnapshot, error) {
if err := d.ensureDB(); err != nil {
return nil, err
}
if userID <= 0 {
return nil, fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return nil, errors.New("conversation_id is empty")
}
var row model.ActiveScheduleSession
err := d.db.WithContext(ctx).
Where("user_id = ? AND conversation_id = ?", userID, normalizedConversationID).
Order("updated_at DESC, created_at DESC, session_id DESC").
First(&row).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
return activeScheduleSessionSnapshotFromRow(&row)
}
// UpdateActiveScheduleSessionFieldsBySessionID 按 session_id 更新局部字段。
//
// 说明:
// 1. 这里不负责 state_json 的序列化,调用方需要自己准备好最终字段值;
// 2. 若 updates 为空,直接返回 nil避免多余的数据库写入
// 3. updated_at 会在这里自动刷新,保证时间线可追踪。
func (d *ActiveScheduleSessionDAO) UpdateActiveScheduleSessionFieldsBySessionID(ctx context.Context, sessionID string, updates map[string]any) error {
if err := d.ensureDB(); err != nil {
return err
}
normalizedSessionID := strings.TrimSpace(sessionID)
if normalizedSessionID == "" {
return errors.New("session_id is empty")
}
if len(updates) == 0 {
return nil
}
normalizedUpdates := cloneUpdateMap(updates)
if _, ok := normalizedUpdates["updated_at"]; !ok {
normalizedUpdates["updated_at"] = time.Now()
}
return d.db.WithContext(ctx).
Model(&model.ActiveScheduleSession{}).
Where("session_id = ?", normalizedSessionID).
Updates(normalizedUpdates).Error
}
// TryTransitionActiveScheduleSessionStatusBySessionID 按 session_id 原子切换主动调度会话状态。
//
// 职责边界:
// 1. 只负责“当前状态仍为 fromStatus 时才切到 toStatus”的轻量 CAS不写 state_json 和 preview_id
// 2. 返回 true 表示本次调用抢到了状态推进权,可以继续执行后续 rerun
// 3. 返回 false 表示已有其他请求先推进了状态,调用方应降级为占管提示,避免重复生成 preview。
func (d *ActiveScheduleSessionDAO) TryTransitionActiveScheduleSessionStatusBySessionID(ctx context.Context, sessionID string, fromStatus string, toStatus string) (bool, error) {
if err := d.ensureDB(); err != nil {
return false, err
}
normalizedSessionID := strings.TrimSpace(sessionID)
if normalizedSessionID == "" {
return false, errors.New("session_id is empty")
}
normalizedFrom, err := normalizeActiveScheduleSessionStatus(fromStatus)
if err != nil {
return false, fmt.Errorf("invalid active schedule session from status: %w", err)
}
normalizedTo, err := normalizeActiveScheduleSessionStatus(toStatus)
if err != nil {
return false, fmt.Errorf("invalid active schedule session to status: %w", err)
}
result := d.db.WithContext(ctx).
Model(&model.ActiveScheduleSession{}).
Where("session_id = ? AND status = ?", normalizedSessionID, normalizedFrom).
Updates(map[string]any{
"status": normalizedTo,
"updated_at": time.Now(),
})
if result.Error != nil {
return false, result.Error
}
return result.RowsAffected > 0, nil
}
// UpdateActiveScheduleSessionFieldsByConversationID 按 user_id + conversation_id 更新最新记录的局部字段。
//
// 步骤化说明:
// 1. 先定位同一 conversation 最新的 session再按 session_id 回写,避免一次 update 覆盖多条历史;
// 2. 再写入局部字段和 updated_at保证状态变化可以按会话维度回写
// 3. 找不到任何会话时直接返回,交给上层决定是否要新建 session 或释放普通聊天。
func (d *ActiveScheduleSessionDAO) UpdateActiveScheduleSessionFieldsByConversationID(ctx context.Context, userID int, conversationID string, updates map[string]any) error {
if err := d.ensureDB(); err != nil {
return err
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
if len(updates) == 0 {
return nil
}
row, err := d.GetActiveScheduleSessionByConversationID(ctx, userID, normalizedConversationID)
if err != nil {
return err
}
if row == nil {
return gorm.ErrRecordNotFound
}
normalizedUpdates := cloneUpdateMap(updates)
if _, ok := normalizedUpdates["updated_at"]; !ok {
normalizedUpdates["updated_at"] = time.Now()
}
return d.db.WithContext(ctx).
Model(&model.ActiveScheduleSession{}).
Where("session_id = ?", row.SessionID).
Updates(normalizedUpdates).Error
}
func normalizeActiveScheduleSessionSnapshot(snapshot *model.ActiveScheduleSessionSnapshot) (*model.ActiveScheduleSessionSnapshot, error) {
if snapshot == nil {
return nil, errors.New("active schedule session snapshot is nil")
}
normalizedSessionID := strings.TrimSpace(snapshot.SessionID)
if normalizedSessionID == "" {
return nil, errors.New("session_id is empty")
}
if snapshot.UserID <= 0 {
return nil, fmt.Errorf("invalid user_id: %d", snapshot.UserID)
}
normalizedStatus, err := normalizeActiveScheduleSessionStatus(snapshot.Status)
if err != nil {
return nil, err
}
normalizedTriggerID := strings.TrimSpace(snapshot.TriggerID)
if normalizedTriggerID == "" {
return nil, errors.New("trigger_id is empty")
}
normalized := *snapshot
normalized.SessionID = normalizedSessionID
normalized.UserID = snapshot.UserID
normalized.ConversationID = strings.TrimSpace(snapshot.ConversationID)
normalized.TriggerID = normalizedTriggerID
normalized.CurrentPreviewID = strings.TrimSpace(snapshot.CurrentPreviewID)
normalized.Status = normalizedStatus
normalized.State = normalizeActiveScheduleSessionState(snapshot.State)
return &normalized, nil
}
func normalizeActiveScheduleSessionStatus(raw string) (string, error) {
switch strings.ToLower(strings.TrimSpace(raw)) {
case model.ActiveScheduleSessionStatusWaitingUserReply:
return model.ActiveScheduleSessionStatusWaitingUserReply, nil
case model.ActiveScheduleSessionStatusRerunning:
return model.ActiveScheduleSessionStatusRerunning, nil
case model.ActiveScheduleSessionStatusReadyPreview:
return model.ActiveScheduleSessionStatusReadyPreview, nil
case model.ActiveScheduleSessionStatusApplied:
return model.ActiveScheduleSessionStatusApplied, nil
case model.ActiveScheduleSessionStatusIgnored:
return model.ActiveScheduleSessionStatusIgnored, nil
case model.ActiveScheduleSessionStatusExpired:
return model.ActiveScheduleSessionStatusExpired, nil
case model.ActiveScheduleSessionStatusFailed:
return model.ActiveScheduleSessionStatusFailed, nil
default:
return "", fmt.Errorf("invalid active schedule session status: %s", raw)
}
}
func normalizeActiveScheduleSessionState(state model.ActiveScheduleSessionState) model.ActiveScheduleSessionState {
state.PendingQuestion = strings.TrimSpace(state.PendingQuestion)
state.LastCandidateID = strings.TrimSpace(state.LastCandidateID)
state.LastNotificationID = strings.TrimSpace(state.LastNotificationID)
state.FailedReason = strings.TrimSpace(state.FailedReason)
if state.ExpiresAt != nil && state.ExpiresAt.IsZero() {
state.ExpiresAt = nil
}
if len(state.MissingInfo) > 0 {
state.MissingInfo = dedupeAndTrimStrings(state.MissingInfo)
}
return state
}
func marshalActiveScheduleSessionState(state model.ActiveScheduleSessionState) (string, error) {
normalized := normalizeActiveScheduleSessionState(state)
raw, err := json.Marshal(normalized)
if err != nil {
return "", err
}
text := strings.TrimSpace(string(raw))
if text == "" {
return "{}", nil
}
return text, nil
}
func unmarshalActiveScheduleSessionState(raw string) (model.ActiveScheduleSessionState, error) {
clean := strings.TrimSpace(raw)
if clean == "" || clean == "null" {
return model.ActiveScheduleSessionState{}, nil
}
var state model.ActiveScheduleSessionState
if err := json.Unmarshal([]byte(clean), &state); err != nil {
return model.ActiveScheduleSessionState{}, err
}
state = normalizeActiveScheduleSessionState(state)
return state, nil
}
func activeScheduleSessionSnapshotFromRow(row *model.ActiveScheduleSession) (*model.ActiveScheduleSessionSnapshot, error) {
if row == nil {
return nil, errors.New("active schedule session row is nil")
}
state, err := unmarshalActiveScheduleSessionState(row.StateJSON)
if err != nil {
return nil, fmt.Errorf("unmarshal active schedule session state failed: %w", err)
}
return &model.ActiveScheduleSessionSnapshot{
SessionID: row.SessionID,
UserID: row.UserID,
ConversationID: nullableStringValue(row.ConversationID),
TriggerID: row.TriggerID,
CurrentPreviewID: nullableStringValue(row.CurrentPreviewID),
Status: row.Status,
State: state,
CreatedAt: row.CreatedAt,
UpdatedAt: row.UpdatedAt,
}, nil
}
func nullableStringPtr(raw string) *string {
normalized := strings.TrimSpace(raw)
if normalized == "" {
return nil
}
return &normalized
}
func nullableStringValue(raw *string) string {
if raw == nil {
return ""
}
return strings.TrimSpace(*raw)
}
func cloneUpdateMap(updates map[string]any) map[string]any {
cloned := make(map[string]any, len(updates)+1)
for key, value := range updates {
cloned[key] = value
}
return cloned
}
func dedupeAndTrimStrings(values []string) []string {
if len(values) == 0 {
return nil
}
result := make([]string, 0, len(values))
seen := make(map[string]struct{}, len(values))
for _, item := range values {
normalized := strings.TrimSpace(item)
if normalized == "" {
continue
}
if _, ok := seen[normalized]; ok {
continue
}
seen[normalized] = struct{}{}
result = append(result, normalized)
}
if len(result) == 0 {
return nil
}
return result
}

View File

@@ -0,0 +1,226 @@
package dao
import (
"context"
"encoding/json"
"fmt"
"strconv"
"time"
"github.com/cloudwego/eino/schema"
"github.com/go-redis/redis/v8"
)
type AgentCache struct {
client *redis.Client
// 默认窗口大小(会被会话级动态窗口覆盖)
windowSize int
// 缓存过期时间
expiration time.Duration
}
const (
minHistoryWindowSize = 16
maxHistoryWindowSize = 4096
)
func NewAgentCache(client *redis.Client) *AgentCache {
return &AgentCache{
client: client,
windowSize: 128,
expiration: 1 * time.Hour,
}
}
func (m *AgentCache) historyKey(sessionID string) string {
return fmt.Sprintf("smartflow:history:%s", sessionID)
}
func (m *AgentCache) historyWindowKey(sessionID string) string {
return fmt.Sprintf("smartflow:history_window:%s", sessionID)
}
func (m *AgentCache) normalizeWindowSize(size int) int {
if size < minHistoryWindowSize {
return minHistoryWindowSize
}
if size > maxHistoryWindowSize {
return maxHistoryWindowSize
}
return size
}
func (m *AgentCache) getSessionWindowSize(ctx context.Context, sessionID string) (int, error) {
windowKey := m.historyWindowKey(sessionID)
val, err := m.client.Get(ctx, windowKey).Result()
if err == redis.Nil {
return m.windowSize, nil
}
if err != nil {
return 0, err
}
size, convErr := strconv.Atoi(val)
if convErr != nil {
return m.windowSize, nil
}
return m.normalizeWindowSize(size), nil
}
// SetSessionWindowSize 设置会话级窗口上限。
func (m *AgentCache) SetSessionWindowSize(ctx context.Context, sessionID string, size int) error {
normalized := m.normalizeWindowSize(size)
windowKey := m.historyWindowKey(sessionID)
return m.client.Set(ctx, windowKey, normalized, m.expiration).Err()
}
// EnforceHistoryWindow 按当前会话窗口强制修剪历史队列。
func (m *AgentCache) EnforceHistoryWindow(ctx context.Context, sessionID string) error {
size, err := m.getSessionWindowSize(ctx, sessionID)
if err != nil {
return err
}
key := m.historyKey(sessionID)
pipe := m.client.Pipeline()
pipe.LTrim(ctx, key, 0, int64(size-1))
pipe.Expire(ctx, key, m.expiration)
_, err = pipe.Exec(ctx)
return err
}
func (m *AgentCache) PushMessage(ctx context.Context, sessionID string, msg *schema.Message) error {
key := m.historyKey(sessionID)
size, err := m.getSessionWindowSize(ctx, sessionID)
if err != nil {
return err
}
// 1. 序列化 Eino 消息。
data, err := json.Marshal(msg)
if err != nil {
return fmt.Errorf("marshal message failed: %w", err)
}
// 2. 使用 Pipeline 保证“写入+裁剪+续期”原子执行。
pipe := m.client.Pipeline()
pipe.LPush(ctx, key, data)
pipe.LTrim(ctx, key, 0, int64(size-1))
pipe.Expire(ctx, key, m.expiration)
_, err = pipe.Exec(ctx)
return err
}
func (m *AgentCache) GetHistory(ctx context.Context, sessionID string) ([]*schema.Message, error) {
key := m.historyKey(sessionID)
vals, err := m.client.LRange(ctx, key, 0, -1).Result()
if err != nil {
return nil, err
}
if len(vals) == 0 {
return nil, nil
}
messages := make([]*schema.Message, len(vals))
for i, val := range vals {
var msg schema.Message
if err := json.Unmarshal([]byte(val), &msg); err != nil {
return nil, err
}
// LRANGE 返回 [最新...最旧],这里反转成 [最旧...最新]
messages[len(vals)-1-i] = &msg
}
return messages, nil
}
// BackfillHistory 在缓存失效时,把历史消息一次性回填到 Redis。
func (m *AgentCache) BackfillHistory(ctx context.Context, sessionID string, messages []*schema.Message) error {
key := m.historyKey(sessionID)
size, err := m.getSessionWindowSize(ctx, sessionID)
if err != nil {
return err
}
if len(messages) == 0 {
return m.client.Del(ctx, key).Err()
}
values := make([]interface{}, len(messages))
for i, msg := range messages {
data, err := json.Marshal(msg)
if err != nil {
return fmt.Errorf("marshal failed at index %d: %w", i, err)
}
values[i] = data
}
pipe := m.client.Pipeline()
pipe.Del(ctx, key)
pipe.LPush(ctx, key, values...)
pipe.LTrim(ctx, key, 0, int64(size-1))
pipe.Expire(ctx, key, m.expiration)
_, err = pipe.Exec(ctx)
return err
}
func (m *AgentCache) ClearHistory(ctx context.Context, sessionID string) error {
historyKey := m.historyKey(sessionID)
windowKey := m.historyWindowKey(sessionID)
return m.client.Del(ctx, historyKey, windowKey).Err()
}
func (m *AgentCache) GetConversationStatus(ctx context.Context, sessionID string) (bool, error) {
key := fmt.Sprintf("smartflow:conversation_status:%s", sessionID)
n, err := m.client.Exists(ctx, key).Result()
if err != nil {
return false, err
}
return n == 1, nil
}
func (m *AgentCache) SetConversationStatus(ctx context.Context, sessionID string) error {
key := fmt.Sprintf("smartflow:conversation_status:%s", sessionID)
// 仅用于“存在性”标记:只有不存在时才写入,避免重复写。
return m.client.SetNX(ctx, key, 1, m.expiration).Err()
}
func (m *AgentCache) DeleteConversationStatus(ctx context.Context, sessionID string) error {
key := fmt.Sprintf("smartflow:conversation_status:%s", sessionID)
return m.client.Del(ctx, key).Err()
}
// ---- Compaction 缓存 ----
func (m *AgentCache) compactionKey(chatID string) string {
return fmt.Sprintf("smartflow:compaction:%s", chatID)
}
// SaveCompactionCache 将压缩摘要缓存到 Redis。
func (m *AgentCache) SaveCompactionCache(ctx context.Context, chatID string, summary string, watermark int) error {
key := m.compactionKey(chatID)
data, _ := json.Marshal(map[string]any{
"summary": summary,
"watermark": watermark,
})
return m.client.Set(ctx, key, data, m.expiration).Err()
}
// LoadCompactionCache 从 Redis 读取压缩摘要缓存。
func (m *AgentCache) LoadCompactionCache(ctx context.Context, chatID string) (summary string, watermark int, ok bool, err error) {
key := m.compactionKey(chatID)
val, err := m.client.Get(ctx, key).Result()
if err != nil {
if err == redis.Nil {
return "", 0, false, nil
}
return "", 0, false, err
}
var data struct {
Summary string `json:"summary"`
Watermark int `json:"watermark"`
}
if jsonErr := json.Unmarshal([]byte(val), &data); jsonErr != nil {
return "", 0, false, nil
}
return data.Summary, data.Watermark, true, nil
}

View File

@@ -0,0 +1,483 @@
package dao
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
type AgentDAO struct {
db *gorm.DB
}
func NewAgentDAO(db *gorm.DB) *AgentDAO {
return &AgentDAO{db: db}
}
func (r *AgentDAO) WithTx(tx *gorm.DB) *AgentDAO {
return &AgentDAO{db: tx}
}
// saveChatHistoryCore 是"聊天消息落库 + 会话统计更新"的核心实现。
//
// 职责边界:
// 1. 只执行当前 DAO 句柄上的数据库写入动作;
// 2. 不主动开启事务(事务由调用方决定);
// 3. 保证 chat_histories 与 agent_chats.message_count 的一致性口径。
//
// 失败处理:
// 1. 任一步骤失败都返回 error
// 2. 若调用方处于事务中,返回 error 会触发事务回滚。
//
// 关于 retry 字段:
// 1. retry 机制已整体下线,本函数不再写入 retry_group_id / retry_index / retry_from_* 四列;
// 2. 这些列在 GORM ChatHistory 模型上暂时保留,列本身可空,历史数据不受影响;
// 3. Step B 会做 DROP COLUMN 的 migration。
func (a *AgentDAO) saveChatHistoryCore(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, tokensConsumed int, sourceEventID string) error {
// 0. token 入库前兜底:负数统一归零,避免异常值污染累计统计。
if tokensConsumed < 0 {
tokensConsumed = 0
}
reasoningContent = strings.TrimSpace(reasoningContent)
if reasoningDurationSeconds < 0 {
reasoningDurationSeconds = 0
}
normalizedEventID := strings.TrimSpace(sourceEventID)
var normalizedEventIDPtr *string
if normalizedEventID != "" {
normalizedEventIDPtr = &normalizedEventID
var chat model.AgentChat
err := a.db.WithContext(ctx).
Clauses(clause.Locking{Strength: "UPDATE"}).
Select("last_history_event_id").
Where("user_id = ? AND chat_id = ?", userID, conversationID).
First(&chat).Error
if err != nil {
return err
}
if chat.LastHistoryEventID != nil && strings.TrimSpace(*chat.LastHistoryEventID) == normalizedEventID {
return nil
}
}
// 1. 先写 chat_histories 原始消息。
var reasoningContentPtr *string
if reasoningContent != "" {
reasoningContentPtr = &reasoningContent
}
userChat := model.ChatHistory{
SourceEventID: normalizedEventIDPtr,
UserID: userID,
MessageContent: &message,
ReasoningContent: reasoningContentPtr,
ReasoningDurationSeconds: reasoningDurationSeconds,
Role: &role,
ChatID: conversationID,
TokensConsumed: tokensConsumed,
}
if err := a.db.WithContext(ctx).Create(&userChat).Error; err != nil {
return err
}
// 2. 再更新会话统计,保证 message_count / tokens_total / last_message_at 同步推进。
now := time.Now()
updates := map[string]interface{}{
"message_count": gorm.Expr("message_count + ?", 1),
"tokens_total": gorm.Expr("tokens_total + ?", tokensConsumed),
"last_message_at": &now,
}
if normalizedEventIDPtr != nil {
updates["last_history_event_id"] = normalizedEventIDPtr
}
result := a.db.WithContext(ctx).Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, conversationID).
Updates(updates)
if result.Error != nil {
return result.Error
}
if result.RowsAffected == 0 {
return fmt.Errorf("conversation not found when updating stats: user_id=%d chat_id=%s", userID, conversationID)
}
return nil
}
// SaveChatHistoryInTx 在调用方"已开启事务"的场景下写入聊天历史。
//
// 设计目的:
// 1. 给服务层组合多个 DAO 操作时复用,避免嵌套事务;
// 2. 让 outbox 消费处理器可以和业务写入共享同一个 tx。
func (a *AgentDAO) SaveChatHistoryInTx(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, tokensConsumed int, sourceEventID string) error {
return a.saveChatHistoryCore(ctx, userID, conversationID, role, message, reasoningContent, reasoningDurationSeconds, tokensConsumed, sourceEventID)
}
// SaveChatHistory 在同步直写路径下写入聊天历史。
//
// 说明:
// 1. 该方法会自行开启事务;
// 2. 内部复用 saveChatHistoryCore确保和 SaveChatHistoryInTx 的业务口径完全一致。
func (a *AgentDAO) SaveChatHistory(ctx context.Context, userID int, conversationID string, role, message, reasoningContent string, reasoningDurationSeconds int, tokensConsumed int, sourceEventID string) error {
return a.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
return a.WithTx(tx).saveChatHistoryCore(ctx, userID, conversationID, role, message, reasoningContent, reasoningDurationSeconds, tokensConsumed, sourceEventID)
})
}
// adjustTokenUsageCore 在同一事务语义下做"会话"token 账本增量调整。
//
// 职责边界:
// 1. 只更新 agent_chats.tokens_total
// 2. 不写 chat_histories消息落库由 SaveChatHistory* 路径负责);
// 3. deltaTokens<=0 时视为无操作,直接返回。
func (a *AgentDAO) adjustTokenUsageCore(ctx context.Context, userID int, conversationID string, deltaTokens int, eventID string) error {
if deltaTokens <= 0 {
return nil
}
normalizedEventID := strings.TrimSpace(eventID)
var normalizedEventIDPtr *string
if normalizedEventID != "" {
normalizedEventIDPtr = &normalizedEventID
var chat model.AgentChat
err := a.db.WithContext(ctx).
Clauses(clause.Locking{Strength: "UPDATE"}).
Select("last_token_adjust_event_id").
Where("user_id = ? AND chat_id = ?", userID, conversationID).
First(&chat).Error
if err != nil {
return err
}
if chat.LastTokenAdjustEventID != nil && strings.TrimSpace(*chat.LastTokenAdjustEventID) == normalizedEventID {
return nil
}
}
chatUpdate := a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, conversationID).
Updates(map[string]interface{}{
"tokens_total": gorm.Expr("tokens_total + ?", deltaTokens),
"last_token_adjust_event_id": normalizedEventIDPtr,
})
if chatUpdate.Error != nil {
return chatUpdate.Error
}
if chatUpdate.RowsAffected == 0 {
return fmt.Errorf("conversation not found when adjusting tokens: user_id=%d chat_id=%s", userID, conversationID)
}
return nil
}
// AdjustTokenUsageInTx 在调用方已开启事务时执行 token 账本增量调整。
func (a *AgentDAO) AdjustTokenUsageInTx(ctx context.Context, userID int, conversationID string, deltaTokens int, eventID string) error {
return a.adjustTokenUsageCore(ctx, userID, conversationID, deltaTokens, eventID)
}
// AdjustTokenUsage 在同步路径下执行 token 账本增量调整(内部自带事务)。
func (a *AgentDAO) AdjustTokenUsage(ctx context.Context, userID int, conversationID string, deltaTokens int, eventID string) error {
return a.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
return a.WithTx(tx).adjustTokenUsageCore(ctx, userID, conversationID, deltaTokens, eventID)
})
}
func (a *AgentDAO) CreateNewChat(userID int, chatID string) (int64, error) {
chat := model.AgentChat{
ChatID: chatID,
UserID: userID,
MessageCount: 0,
LastMessageAt: nil,
}
if err := a.db.Create(&chat).Error; err != nil {
return 0, err
}
return chat.ID, nil
}
func (a *AgentDAO) GetUserChatHistories(ctx context.Context, userID, limit int, chatID string) ([]model.ChatHistory, error) {
var histories []model.ChatHistory
err := a.db.WithContext(ctx).
Where("user_id = ? AND chat_id = ?", userID, chatID).
Order("created_at desc").
Limit(limit).
Find(&histories).Error
if err != nil {
return nil, err
}
// 保留"最近 N 条"后,反转成时间正序,方便模型消费。
for i, j := 0, len(histories)-1; i < j; i, j = i+1, j-1 {
histories[i], histories[j] = histories[j], histories[i]
}
return histories, nil
}
func (a *AgentDAO) IfChatExists(ctx context.Context, userID int, chatID string) (bool, error) {
var chat model.AgentChat
err := a.db.WithContext(ctx).Where("user_id = ? AND chat_id = ?", userID, chatID).First(&chat).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return false, nil
}
return false, err
}
return true, nil
}
// GetConversationMeta 查询单个会话元信息。
func (a *AgentDAO) GetConversationMeta(ctx context.Context, userID int, chatID string) (*model.AgentChat, error) {
var chat model.AgentChat
err := a.db.WithContext(ctx).
Select("chat_id", "title", "message_count", "last_message_at", "status").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return nil, err
}
return &chat, nil
}
// GetConversationTitle 读取当前会话标题。
func (a *AgentDAO) GetConversationTitle(ctx context.Context, userID int, chatID string) (title string, exists bool, err error) {
var chat model.AgentChat
queryErr := a.db.WithContext(ctx).
Select("title").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if queryErr != nil {
if errors.Is(queryErr, gorm.ErrRecordNotFound) {
return "", false, nil
}
return "", false, queryErr
}
if chat.Title == nil {
return "", true, nil
}
return strings.TrimSpace(*chat.Title), true, nil
}
// UpdateConversationTitleIfEmpty 仅在标题为空时更新会话标题。
func (a *AgentDAO) UpdateConversationTitleIfEmpty(ctx context.Context, userID int, chatID, title string) error {
normalized := strings.TrimSpace(title)
if normalized == "" {
return nil
}
return a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ? AND (title IS NULL OR title = '')", userID, chatID).
Update("title", normalized).Error
}
// GetConversationList 按分页查询指定用户的会话列表。
//
// 职责边界:
// 1. 只负责读库,不负责缓存;
// 2. 只负责 user_id 数据隔离,不负责参数合法性兜底(由 service 负责);
// 3. 返回总数 total 供上层计算 has_more。
func (a *AgentDAO) GetConversationList(ctx context.Context, userID, page, pageSize int, status string) ([]model.AgentChat, int64, error) {
// 1. 先构造统一过滤条件,保证 total 与 list 的统计口径一致。
baseQuery := a.db.WithContext(ctx).Model(&model.AgentChat{}).Where("user_id = ?", userID)
if strings.TrimSpace(status) != "" {
baseQuery = baseQuery.Where("status = ?", status)
}
// 2. 先查总条数,给前端分页器提供完整元信息。
var total int64
if err := baseQuery.Count(&total).Error; err != nil {
return nil, 0, err
}
if total == 0 {
return make([]model.AgentChat, 0), 0, nil
}
// 3. 再查当前页数据:
// 3.1 按最近消息时间倒序,保证"最近活跃"优先展示;
// 3.2 同时间戳下按 id 倒序,避免翻页时顺序抖动。
offset := (page - 1) * pageSize
var chats []model.AgentChat
query := a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Select("id", "chat_id", "title", "message_count", "last_message_at", "status", "created_at").
Where("user_id = ?", userID)
if strings.TrimSpace(status) != "" {
query = query.Where("status = ?", status)
}
if err := query.Order("last_message_at DESC").
Order("id DESC").
Offset(offset).
Limit(pageSize).
Find(&chats).Error; err != nil {
return nil, 0, err
}
return chats, total, nil
}
// ---- 压缩摘要持久化 ----
//
// 1. 旧接口 SaveCompaction / LoadCompaction 继续保留,默认只读写 execute 阶段。
// 2. 新接口按 stageKey 分桶读写,数据仍然落在 agent_chats.compaction_summary。
// 3. 为兼容历史数据,若 compaction_summary 仍是旧字符串格式,则自动回退读取。
func (a *AgentDAO) SaveCompaction(ctx context.Context, userID int, chatID string, summary string, watermark int) error {
return a.SaveStageCompaction(ctx, userID, chatID, "execute", summary, watermark)
}
func (a *AgentDAO) LoadCompaction(ctx context.Context, userID int, chatID string) (summary string, watermark int, err error) {
return a.LoadStageCompaction(ctx, userID, chatID, "execute")
}
// SaveContextTokenStats 保存上下文窗口 token 分布统计。
func (a *AgentDAO) SaveContextTokenStats(ctx context.Context, userID int, chatID string, statsJSON string) error {
return a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, chatID).
Update("context_token_stats", statsJSON).Error
}
// LoadContextTokenStats 读取上下文窗口 token 分布统计。
func (a *AgentDAO) LoadContextTokenStats(ctx context.Context, userID int, chatID string) (string, error) {
var chat model.AgentChat
err := a.db.WithContext(ctx).
Select("context_token_stats").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return "", err
}
if chat.ContextTokenStats != nil {
return *chat.ContextTokenStats, nil
}
return "", nil
}
type stageCompactionRecord struct {
Summary string `json:"summary"`
Watermark int `json:"watermark"`
}
type stageCompactionEnvelope struct {
Version int `json:"version"`
Stages map[string]stageCompactionRecord `json:"stages"`
}
// normalizeCompactionStageKey 统一 stageKey 的写法,避免 "Execute" 和 "execute" 被当成两个键。
func normalizeCompactionStageKey(stageKey string) string {
key := strings.ToLower(strings.TrimSpace(stageKey))
if key == "" {
return "execute"
}
return key
}
// loadStageCompactionStages 负责把数据库里的压缩摘要统一解包成 stage -> record。
//
// 1. 先处理空值,避免后续逻辑误判。
// 2. 如果已经是 JSON envelope就按 stage 逐项读取。
// 3. 如果还是旧版纯字符串,就把它当作 execute 阶段的兼容数据。
func loadStageCompactionStages(summary *string, watermark int) map[string]stageCompactionRecord {
stages := map[string]stageCompactionRecord{}
if summary == nil {
return stages
}
raw := strings.TrimSpace(*summary)
if raw == "" {
return stages
}
var env stageCompactionEnvelope
if err := json.Unmarshal([]byte(raw), &env); err == nil && len(env.Stages) > 0 {
for key, record := range env.Stages {
stages[normalizeCompactionStageKey(key)] = stageCompactionRecord{
Summary: strings.TrimSpace(record.Summary),
Watermark: record.Watermark,
}
}
return stages
}
stages["execute"] = stageCompactionRecord{
Summary: raw,
Watermark: watermark,
}
return stages
}
// marshalStageCompactionStages 负责把按阶段分桶后的摘要重新编码为 JSON envelope。
func marshalStageCompactionStages(stages map[string]stageCompactionRecord) (string, error) {
env := stageCompactionEnvelope{
Version: 1,
Stages: stages,
}
data, err := json.Marshal(env)
if err != nil {
return "", err
}
return string(data), nil
}
// LoadStageCompaction 按 stageKey 读取压缩摘要和水位线。
func (a *AgentDAO) LoadStageCompaction(ctx context.Context, userID int, chatID string, stageKey string) (summary string, watermark int, err error) {
stageKey = normalizeCompactionStageKey(stageKey)
var chat model.AgentChat
err = a.db.WithContext(ctx).
Select("compaction_summary", "compaction_watermark").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return "", 0, err
}
stages := loadStageCompactionStages(chat.CompactionSummary, chat.CompactionWatermark)
if record, ok := stages[stageKey]; ok {
return record.Summary, record.Watermark, nil
}
return "", 0, nil
}
// SaveStageCompaction 按 stageKey 保存压缩摘要和水位线。
//
// 1. 先读取现有摘要,避免覆盖其他阶段已经写入的数据。
// 2. 再更新当前阶段对应的分桶内容。
// 3. 最后整体回写 JSON envelope并保留 execute 阶段的 legacy watermark 兼容字段。
func (a *AgentDAO) SaveStageCompaction(ctx context.Context, userID int, chatID string, stageKey string, summary string, watermark int) error {
stageKey = normalizeCompactionStageKey(stageKey)
var chat model.AgentChat
err := a.db.WithContext(ctx).
Select("compaction_summary", "compaction_watermark").
Where("user_id = ? AND chat_id = ?", userID, chatID).
First(&chat).Error
if err != nil {
return err
}
stages := loadStageCompactionStages(chat.CompactionSummary, chat.CompactionWatermark)
stages[stageKey] = stageCompactionRecord{
Summary: strings.TrimSpace(summary),
Watermark: watermark,
}
payload, err := marshalStageCompactionStages(stages)
if err != nil {
return err
}
legacyWatermark := watermark
if executeRecord, ok := stages["execute"]; ok {
legacyWatermark = executeRecord.Watermark
}
return a.db.WithContext(ctx).
Model(&model.AgentChat{}).
Where("user_id = ? AND chat_id = ?", userID, chatID).
Updates(map[string]any{
"compaction_summary": payload,
"compaction_watermark": legacyWatermark,
}).Error
}

View File

@@ -0,0 +1,252 @@
package dao
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
// UpsertScheduleStateSnapshot 以“user_id + conversation_id”维度写入/覆盖排程状态快照。
//
// 职责边界:
// 1. 负责把强类型快照序列化并持久化到 agent_schedule_states
// 2. 负责 upsert 冲突更新(同会话覆盖),并自动 revision+1
// 3. 不负责 Redis 缓存读写,不负责业务分流,不负责正式日程落库。
//
// 步骤化说明:
// 1. 先做参数与主键语义校验,避免把脏快照写入数据库;
// 2. 再把切片字段统一序列化为 JSON保证表内口径稳定
// 3. 最后执行 OnConflict upsert
// 3.1 新记录直接插入;
// 3.2 已存在记录则覆盖业务字段,并把 revision 自增;
// 3.3 任一阶段失败都返回 error由上层决定是否降级。
func (a *AgentDAO) UpsertScheduleStateSnapshot(ctx context.Context, snapshot *model.SchedulePlanStateSnapshot) error {
if a == nil || a.db == nil {
return errors.New("agent dao is not initialized")
}
if snapshot == nil {
return errors.New("schedule state snapshot is nil")
}
if snapshot.UserID <= 0 {
return fmt.Errorf("invalid snapshot user_id: %d", snapshot.UserID)
}
conversationID := strings.TrimSpace(snapshot.ConversationID)
if conversationID == "" {
return errors.New("schedule state snapshot conversation_id is empty")
}
taskClassIDsJSON, err := marshalJSONOrDefault(snapshot.TaskClassIDs, "[]")
if err != nil {
return fmt.Errorf("marshal task_class_ids failed: %w", err)
}
constraintsJSON, err := marshalJSONOrDefault(snapshot.Constraints, "[]")
if err != nil {
return fmt.Errorf("marshal constraints failed: %w", err)
}
hybridEntriesJSON, err := marshalJSONOrDefault(snapshot.HybridEntries, "[]")
if err != nil {
return fmt.Errorf("marshal hybrid_entries failed: %w", err)
}
allocatedItemsJSON, err := marshalJSONOrDefault(snapshot.AllocatedItems, "[]")
if err != nil {
return fmt.Errorf("marshal allocated_items failed: %w", err)
}
candidatePlansJSON, err := marshalJSONOrDefault(snapshot.CandidatePlans, "[]")
if err != nil {
return fmt.Errorf("marshal candidate_plans failed: %w", err)
}
stateVersion := snapshot.StateVersion
if stateVersion <= 0 {
stateVersion = model.SchedulePlanStateVersionV1
}
revision := snapshot.Revision
if revision <= 0 {
revision = 1
}
row := model.AgentScheduleState{
UserID: snapshot.UserID,
ConversationID: conversationID,
Revision: revision,
StateVersion: stateVersion,
TaskClassIDsJSON: taskClassIDsJSON,
ConstraintsJSON: constraintsJSON,
HybridEntriesJSON: hybridEntriesJSON,
AllocatedItemsJSON: allocatedItemsJSON,
CandidatePlansJSON: candidatePlansJSON,
UserIntent: strings.TrimSpace(snapshot.UserIntent),
Strategy: normalizeStrategy(snapshot.Strategy),
AdjustmentScope: normalizeAdjustmentScope(snapshot.AdjustmentScope),
RestartRequested: snapshot.RestartRequested,
FinalSummary: strings.TrimSpace(snapshot.FinalSummary),
Completed: snapshot.Completed,
TraceID: strings.TrimSpace(snapshot.TraceID),
}
now := time.Now()
return a.db.WithContext(ctx).Clauses(clause.OnConflict{
Columns: []clause.Column{
{Name: "user_id"},
{Name: "conversation_id"},
},
DoUpdates: clause.Assignments(map[string]any{
"revision": gorm.Expr("revision + 1"),
"state_version": row.StateVersion,
"task_class_ids": row.TaskClassIDsJSON,
"constraints": row.ConstraintsJSON,
"hybrid_entries": row.HybridEntriesJSON,
"allocated_items": row.AllocatedItemsJSON,
"candidate_plans": row.CandidatePlansJSON,
"user_intent": row.UserIntent,
"strategy": row.Strategy,
"adjustment_scope": row.AdjustmentScope,
"restart_requested": row.RestartRequested,
"final_summary": row.FinalSummary,
"completed": row.Completed,
"trace_id": row.TraceID,
"updated_at": now,
}),
}).Create(&row).Error
}
// GetScheduleStateSnapshot 读取指定会话的排程状态快照。
//
// 职责边界:
// 1. 负责按 user_id + conversation_id 查询快照;
// 2. 负责把数据库 JSON 字段反序列化回强类型结构;
// 3. 不负责回填 Redis不负责业务分流判定。
//
// 返回语义:
// 1. 命中:返回 snapshot, nil
// 2. 未命中:返回 nil, nil上层可继续走其他兜底
// 3. 反序列化失败:返回 error说明库内数据不合法需要排障
func (a *AgentDAO) GetScheduleStateSnapshot(ctx context.Context, userID int, conversationID string) (*model.SchedulePlanStateSnapshot, error) {
if a == nil || a.db == nil {
return nil, errors.New("agent dao is not initialized")
}
if userID <= 0 {
return nil, fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return nil, errors.New("conversation_id is empty")
}
var row model.AgentScheduleState
err := a.db.WithContext(ctx).
Where("user_id = ? AND conversation_id = ?", userID, normalizedConversationID).
First(&row).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, err
}
taskClassIDs := make([]int, 0)
if err = unmarshalJSONOrDefault(row.TaskClassIDsJSON, &taskClassIDs, []int{}); err != nil {
return nil, fmt.Errorf("unmarshal task_class_ids failed: %w", err)
}
constraints := make([]string, 0)
if err = unmarshalJSONOrDefault(row.ConstraintsJSON, &constraints, []string{}); err != nil {
return nil, fmt.Errorf("unmarshal constraints failed: %w", err)
}
hybridEntries := make([]model.HybridScheduleEntry, 0)
if err = unmarshalJSONOrDefault(row.HybridEntriesJSON, &hybridEntries, []model.HybridScheduleEntry{}); err != nil {
return nil, fmt.Errorf("unmarshal hybrid_entries failed: %w", err)
}
allocatedItems := make([]model.TaskClassItem, 0)
if err = unmarshalJSONOrDefault(row.AllocatedItemsJSON, &allocatedItems, []model.TaskClassItem{}); err != nil {
return nil, fmt.Errorf("unmarshal allocated_items failed: %w", err)
}
candidatePlans := make([]model.UserWeekSchedule, 0)
if err = unmarshalJSONOrDefault(row.CandidatePlansJSON, &candidatePlans, []model.UserWeekSchedule{}); err != nil {
return nil, fmt.Errorf("unmarshal candidate_plans failed: %w", err)
}
return &model.SchedulePlanStateSnapshot{
UserID: row.UserID,
ConversationID: row.ConversationID,
Revision: row.Revision,
StateVersion: row.StateVersion,
TaskClassIDs: taskClassIDs,
Constraints: constraints,
HybridEntries: hybridEntries,
AllocatedItems: allocatedItems,
CandidatePlans: candidatePlans,
UserIntent: row.UserIntent,
Strategy: normalizeStrategy(row.Strategy),
AdjustmentScope: normalizeAdjustmentScope(row.AdjustmentScope),
RestartRequested: row.RestartRequested,
FinalSummary: row.FinalSummary,
Completed: row.Completed,
TraceID: row.TraceID,
UpdatedAt: row.UpdatedAt,
}, nil
}
// marshalJSONOrDefault 统一处理“结构体 -> JSON 字符串”序列化。
//
// 设计目的:
// 1. 避免每个字段手写重复的 marshal 判空逻辑;
// 2. nil 场景统一写成默认 JSON例如 [])以保持数据库口径稳定;
// 3. 序列化失败直接上抛,防止写入半成品快照。
func marshalJSONOrDefault(v any, defaultJSON string) (string, error) {
if v == nil {
return defaultJSON, nil
}
raw, err := json.Marshal(v)
if err != nil {
return "", err
}
text := strings.TrimSpace(string(raw))
if text == "" || text == "null" {
return defaultJSON, nil
}
return text, nil
}
// unmarshalJSONOrDefault 统一处理“JSON 字符串 -> 结构体”反序列化。
//
// 设计目的:
// 1. 数据为空、null 时回落到默认值,避免上层到处判空;
// 2. 保留错误上抛,便于定位历史脏数据;
// 3. 保障读取到的快照字段始终有确定值语义。
func unmarshalJSONOrDefault[T any](raw string, target *T, defaultValue T) error {
clean := strings.TrimSpace(raw)
if clean == "" || clean == "null" {
*target = defaultValue
return nil
}
return json.Unmarshal([]byte(clean), target)
}
// normalizeStrategy 归一化快照中的 strategy 字段。
func normalizeStrategy(raw string) string {
switch strings.ToLower(strings.TrimSpace(raw)) {
case "rapid":
return "rapid"
default:
return "steady"
}
}
// normalizeAdjustmentScope 归一化快照中的微调力度字段。
func normalizeAdjustmentScope(raw string) string {
switch strings.ToLower(strings.TrimSpace(raw)) {
case "small":
return "small"
case "medium":
return "medium"
default:
return "large"
}
}

View File

@@ -0,0 +1,53 @@
package dao
import (
"context"
"errors"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
)
// AgentStateStoreAdapter 将 CacheDAO 适配为 agent 的 AgentStateStore 接口。
//
// 职责边界:
// 1. CacheDAO 的 LoadAgentState 使用 out-parameter 模式,需要适配到返回值模式;
// 2. CacheDAO 的 SaveAgentState 接受 any需要适配到 *AgentStateSnapshot
// 3. DeleteAgentState 签名已匹配,直接转发。
type AgentStateStoreAdapter struct {
cache *CacheDAO
}
// NewAgentStateStoreAdapter 创建适配器。
func NewAgentStateStoreAdapter(cache *CacheDAO) *AgentStateStoreAdapter {
return &AgentStateStoreAdapter{cache: cache}
}
// Save 序列化并保存 agent 状态快照。
func (a *AgentStateStoreAdapter) Save(ctx context.Context, conversationID string, snapshot *agentmodel.AgentStateSnapshot) error {
if a == nil || a.cache == nil {
return errors.New("agent state store adapter is not initialized")
}
return a.cache.SaveAgentState(ctx, conversationID, snapshot)
}
// Load 读取并反序列化 agent 状态快照。
func (a *AgentStateStoreAdapter) Load(ctx context.Context, conversationID string) (*agentmodel.AgentStateSnapshot, bool, error) {
if a == nil || a.cache == nil {
return nil, false, errors.New("agent state store adapter is not initialized")
}
var snapshot agentmodel.AgentStateSnapshot
ok, err := a.cache.LoadAgentState(ctx, conversationID, &snapshot)
if err != nil || !ok {
return nil, ok, err
}
return &snapshot, true, nil
}
// Delete 删除 agent 状态快照。
func (a *AgentStateStoreAdapter) Delete(ctx context.Context, conversationID string) error {
if a == nil || a.cache == nil {
return errors.New("agent state store adapter is not initialized")
}
return a.cache.DeleteAgentState(ctx, conversationID)
}

View File

@@ -0,0 +1,86 @@
package dao
import (
"context"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
)
// SaveConversationTimelineEvent 持久化单条会话时间线事件到 MySQL。
//
// 职责边界:
// 1. 只做单条写入,不负责 seq 分配;
// 2. 只保证字段标准化(去空格、空值置 nil不做业务语义修正
// 3. 返回 error 让上层决定是否中断当前链路。
func (a *AgentDAO) SaveConversationTimelineEvent(ctx context.Context, payload model.ChatTimelinePersistPayload) (int64, *time.Time, error) {
normalizedChatID := strings.TrimSpace(payload.ConversationID)
normalizedKind := strings.TrimSpace(payload.Kind)
normalizedRole := strings.TrimSpace(payload.Role)
normalizedContent := strings.TrimSpace(payload.Content)
normalizedPayloadJSON := strings.TrimSpace(payload.PayloadJSON)
var rolePtr *string
if normalizedRole != "" {
rolePtr = &normalizedRole
}
var contentPtr *string
if normalizedContent != "" {
contentPtr = &normalizedContent
}
var payloadPtr *string
if normalizedPayloadJSON != "" {
payloadPtr = &normalizedPayloadJSON
}
event := model.AgentTimelineEvent{
UserID: payload.UserID,
ChatID: normalizedChatID,
Seq: payload.Seq,
Kind: normalizedKind,
Role: rolePtr,
Content: contentPtr,
Payload: payloadPtr,
TokensConsumed: payload.TokensConsumed,
}
if err := a.db.WithContext(ctx).Create(&event).Error; err != nil {
return 0, nil, err
}
return event.ID, event.CreatedAt, nil
}
// ListConversationTimelineEvents 查询会话时间线,按 seq 正序返回。
func (a *AgentDAO) ListConversationTimelineEvents(ctx context.Context, userID int, chatID string) ([]model.AgentTimelineEvent, error) {
normalizedChatID := strings.TrimSpace(chatID)
var events []model.AgentTimelineEvent
err := a.db.WithContext(ctx).
Where("user_id = ? AND chat_id = ?", userID, normalizedChatID).
Order("seq ASC").
Order("id ASC").
Find(&events).Error
if err != nil {
return nil, err
}
return events, nil
}
// GetConversationTimelineMaxSeq 返回会话时间线当前最大 seq。
//
// 说明:
// 1. 该方法主要用于 Redis 顺序号不可用时的 DB 兜底;
// 2. 无记录时返回 0不视为错误
// 3. 上层需要自行 +1 后再写入新事件。
func (a *AgentDAO) GetConversationTimelineMaxSeq(ctx context.Context, userID int, chatID string) (int64, error) {
normalizedChatID := strings.TrimSpace(chatID)
var maxSeq int64
err := a.db.WithContext(ctx).
Model(&model.AgentTimelineEvent{}).
Where("user_id = ? AND chat_id = ?", userID, normalizedChatID).
Select("COALESCE(MAX(seq), 0)").
Scan(&maxSeq).Error
if err != nil {
return 0, err
}
return maxSeq, nil
}

View File

@@ -0,0 +1,64 @@
package dao
import (
"context"
"gorm.io/gorm"
)
// RepoManager 聚合所有 DAO供服务层做跨仓储事务编排。
type RepoManager struct {
db *gorm.DB
Schedule *ScheduleDAO
Task *TaskDAO
Course *CourseDAO
TaskClass *TaskClassDAO
Agent *AgentDAO
ActiveSchedule *ActiveScheduleDAO
ActiveScheduleSession *ActiveScheduleSessionDAO
}
func NewManager(db *gorm.DB) *RepoManager {
return &RepoManager{
db: db,
Schedule: NewScheduleDAO(db),
Task: NewTaskDAO(db),
Course: NewCourseDAO(db),
TaskClass: NewTaskClassDAO(db),
Agent: NewAgentDAO(db),
ActiveSchedule: NewActiveScheduleDAO(db),
ActiveScheduleSession: NewActiveScheduleSessionDAO(db),
}
}
// WithTx 基于外部事务句柄构造“同事务 RepoManager”。
//
// 职责边界:
// 1. 只做 DAO 依赖重绑定,不开启/提交/回滚事务;
// 2. 让服务层在一个 tx 内调用多个 DAO 方法;
// 3. 适用于 outbox 消费处理器这类“基础设施事务 + 业务事务合并”的场景。
func (m *RepoManager) WithTx(tx *gorm.DB) *RepoManager {
return &RepoManager{
db: tx,
Schedule: m.Schedule.WithTx(tx),
Task: m.Task.WithTx(tx),
TaskClass: m.TaskClass.WithTx(tx),
Course: m.Course.WithTx(tx),
Agent: m.Agent.WithTx(tx),
ActiveSchedule: m.ActiveSchedule.WithTx(tx),
ActiveScheduleSession: m.ActiveScheduleSession.WithTx(tx),
}
}
// Transaction 开启事务并把“同事务 RepoManager”传给回调。
//
// 使用约束:
// 1. 回调里应只使用 txM 下挂 DAO避免混入事务外句柄
// 2. 回调返回 error 会触发整体回滚;
// 3. 回调返回 nil 表示提交事务。
func (m *RepoManager) Transaction(ctx context.Context, fn func(txM *RepoManager) error) error {
return m.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
txM := m.WithTx(tx)
return fn(txM)
})
}

View File

@@ -0,0 +1,823 @@
package dao
import (
"context"
"encoding/json"
"errors"
"fmt"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"github.com/go-redis/redis/v8"
)
type CacheDAO struct {
client *redis.Client
}
func NewCacheDAO(client *redis.Client) *CacheDAO {
return &CacheDAO{client: client}
}
func (d *CacheDAO) schedulePreviewKey(userID int, conversationID string) string {
return fmt.Sprintf("smartflow:schedule_preview:u:%d:c:%s", userID, conversationID)
}
func (d *CacheDAO) conversationTimelineKey(userID int, conversationID string) string {
return fmt.Sprintf("smartflow:conversation_timeline:u:%d:c:%s", userID, conversationID)
}
func (d *CacheDAO) conversationTimelineSeqKey(userID int, conversationID string) string {
return fmt.Sprintf("smartflow:conversation_timeline_seq:u:%d:c:%s", userID, conversationID)
}
func (d *CacheDAO) AddTaskClassList(ctx context.Context, userID int, list *model.UserGetTaskClassesResponse) error {
// 1. 定义 Key使用 userID 隔离不同用户的数据。
key := fmt.Sprintf("smartflow:task_classes:%d", userID)
// 2. 序列化:将结构体转为 []byte。
data, err := json.Marshal(list)
if err != nil {
return err
}
// 3. 存储:设置 30 分钟过期,可按业务需要调整。
return d.client.Set(ctx, key, data, 30*time.Minute).Err()
}
func (d *CacheDAO) GetTaskClassList(ctx context.Context, userID int) (*model.UserGetTaskClassesResponse, error) {
key := fmt.Sprintf("smartflow:task_classes:%d", userID)
var resp model.UserGetTaskClassesResponse
// 1. 从 Redis 获取字符串。
val, err := d.client.Get(ctx, key).Result()
if err != nil {
// 注意:若是 redis.Nil则交给 Service 层处理回源查询逻辑。
return &resp, err
}
// 2. 反序列化:将 JSON 还原回结构体。
err = json.Unmarshal([]byte(val), &resp)
return &resp, err
}
func (d *CacheDAO) DeleteTaskClassList(ctx context.Context, userID int) error {
key := fmt.Sprintf("smartflow:task_classes:%d", userID)
return d.client.Del(ctx, key).Err()
}
func (d *CacheDAO) GetRecord(ctx context.Context, key string) (string, error) {
val, err := d.client.Get(ctx, key).Result()
if errors.Is(err, redis.Nil) {
return "", nil // 正常未命中
}
return val, err // 真正的 Redis 错误
}
func (d *CacheDAO) SaveRecord(ctx context.Context, key string, val string, ttl time.Duration) error {
return d.client.Set(ctx, key, val, ttl).Err()
}
func (d *CacheDAO) AcquireLock(ctx context.Context, key string, ttl time.Duration) (bool, error) {
return d.client.SetNX(ctx, key, "processing", ttl).Result()
}
func (d *CacheDAO) ReleaseLock(ctx context.Context, key string) error {
return d.client.Del(ctx, key).Err()
}
// GetUserTasksFromCache 读取用户任务缓存(内部模型版本)。
//
// 职责边界:
// 1. 负责从 Redis 读取 `[]model.Task`,供 Service 层做“读时派生优先级”;
// 2. 不负责把模型转换成对外 DTO该职责在 conv 层);
// 3. 不负责缓存回填和缓存失效(回填由 Service 控制,失效由 GORM cache_deleter 统一处理)。
//
// 输入输出语义:
// 1. 命中缓存时返回任务模型切片与 nil error
// 2. 未命中时返回 redis.Nil由上层决定是否回源 DB
// 3. 反序列化失败时返回 error避免把损坏缓存继续向后传播。
func (d *CacheDAO) GetUserTasksFromCache(ctx context.Context, userID int) ([]model.Task, error) {
key := fmt.Sprintf("smartflow:tasks:%d", userID)
var tasks []model.Task
val, err := d.client.Get(ctx, key).Result()
if err != nil {
return nil, err // 注意:若是 redis.Nil则交给 Service 层处理回源查询逻辑
}
err = json.Unmarshal([]byte(val), &tasks)
return tasks, err
}
// SetUserTasksToCache 写入用户任务缓存(内部模型版本)。
//
// 职责边界:
// 1. 负责把 DB 读取到的原始 `[]model.Task` 写入缓存;
// 2. 不负责对任务做“紧急性平移派生”,避免把派生结果写回缓存导致后续无法继续触发异步平移;
// 3. 不负责缓存删除,删除策略由 cache_deleter 在写库后触发。
//
// 步骤说明:
// 1. 先把模型序列化为 JSON确保 `urgency_threshold_at` 等字段完整保留;
// 2. 再写入固定 TTL 缓存,命中后可减少 DB 读取压力;
// 3. 若序列化失败立即返回 error避免写入半结构化垃圾数据。
func (d *CacheDAO) SetUserTasksToCache(ctx context.Context, userID int, tasks []model.Task) error {
key := fmt.Sprintf("smartflow:tasks:%d", userID)
data, err := json.Marshal(tasks)
if err != nil {
return err
}
return d.client.Set(ctx, key, data, 24*time.Hour).Err()
}
func (d *CacheDAO) DeleteUserTasksFromCache(ctx context.Context, userID int) error {
key := fmt.Sprintf("smartflow:tasks:%d", userID)
return d.client.Del(ctx, key).Err()
}
func (d *CacheDAO) GetUserTodayScheduleFromCache(ctx context.Context, userID int) ([]model.UserTodaySchedule, error) {
key := fmt.Sprintf("smartflow:today_schedule:%d", userID)
var schedules []model.UserTodaySchedule
val, err := d.client.Get(ctx, key).Result()
if err != nil {
return nil, err // 注意:若是 redis.Nil则交给 Service 层处理回源查询逻辑
}
err = json.Unmarshal([]byte(val), &schedules)
return schedules, err
}
func (d *CacheDAO) SetUserTodayScheduleToCache(ctx context.Context, userID int, schedules []model.UserTodaySchedule) error {
key := fmt.Sprintf("smartflow:today_schedule:%d", userID)
data, err := json.Marshal(schedules)
if err != nil {
return err
}
// 设置过期时间为“当天剩余时间”,保证每天自然刷新一次缓存。
return d.client.Set(ctx, key, data, time.Until(time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day()+1, 0, 0, 0, 0, time.Now().Location()))).Err()
}
func (d *CacheDAO) DeleteUserTodayScheduleFromCache(ctx context.Context, userID int) error {
key := fmt.Sprintf("smartflow:today_schedule:%d", userID)
return d.client.Del(ctx, key).Err()
}
func (d *CacheDAO) GetUserWeeklyScheduleFromCache(ctx context.Context, userID int, week int) (*model.UserWeekSchedule, error) {
key := fmt.Sprintf("smartflow:weekly_schedule:%d:%d", userID, week)
var schedules model.UserWeekSchedule
val, err := d.client.Get(ctx, key).Result()
if err != nil {
return nil, err // 注意:若是 redis.Nil则交给 Service 层处理回源查询逻辑
}
err = json.Unmarshal([]byte(val), &schedules)
return &schedules, err
}
func (d *CacheDAO) SetUserWeeklyScheduleToCache(ctx context.Context, userID int, schedules *model.UserWeekSchedule) error {
key := fmt.Sprintf("smartflow:weekly_schedule:%d:%d", userID, schedules.Week)
data, err := json.Marshal(schedules)
if err != nil {
return err
}
// 设置过期时间为一天。
return d.client.Set(ctx, key, data, 24*time.Hour).Err()
}
func (d *CacheDAO) DeleteUserWeeklyScheduleFromCache(ctx context.Context, userID int, week int) error {
key := fmt.Sprintf("smartflow:weekly_schedule:%d:%d", userID, week)
return d.client.Del(ctx, key).Err()
}
func (d *CacheDAO) GetUserRecentCompletedSchedulesFromCache(ctx context.Context, userID, index, limit int) (*model.UserRecentCompletedScheduleResponse, error) {
key := fmt.Sprintf("smartflow:recent_completed_schedules:%d:%d:%d", userID, index, limit)
var resp model.UserRecentCompletedScheduleResponse
val, err := d.client.Get(ctx, key).Result()
if err != nil {
return &resp, err // 注意:若是 redis.Nil则交给 Service 层处理回源查询逻辑
}
err = json.Unmarshal([]byte(val), &resp)
return &resp, err
}
func (d *CacheDAO) SetUserRecentCompletedSchedulesToCache(ctx context.Context, userID, index, limit int, resp *model.UserRecentCompletedScheduleResponse) error {
key := fmt.Sprintf("smartflow:recent_completed_schedules:%d:%d:%d", userID, index, limit)
data, err := json.Marshal(resp)
if err != nil {
return err
}
// 设置过期时间为 30 分钟。
return d.client.Set(ctx, key, data, 30*time.Minute).Err()
}
func (d *CacheDAO) DeleteUserRecentCompletedSchedulesFromCache(ctx context.Context, userID int) error {
pattern := fmt.Sprintf("smartflow:recent_completed_schedules:%d:*", userID)
var cursor uint64
for {
keys, next, err := d.client.Scan(ctx, cursor, pattern, 500).Result()
if err != nil {
return err
}
if len(keys) > 0 {
// 使用 UNLINK() 异步删除,降低阻塞风险;若需要强一致删除可改用 Del()。
if err := d.client.Unlink(ctx, keys...).Err(); err != nil {
return err
}
}
cursor = next
if cursor == 0 {
break
}
}
return nil
}
func (d *CacheDAO) GetUserOngoingScheduleFromCache(ctx context.Context, userID int) (*model.OngoingSchedule, error) {
key := fmt.Sprintf("smartflow:ongoing_schedule:%d", userID)
var schedule model.OngoingSchedule
val, err := d.client.Get(ctx, key).Result()
if err != nil {
return &schedule, err // 注意:若是 redis.Nil则交给 Service 层处理回源查询逻辑
}
if val == "null" {
return nil, nil // 之前缓存过“当前没有正在进行的日程”,这里直接返回 nil
}
err = json.Unmarshal([]byte(val), &schedule)
return &schedule, err
}
func (d *CacheDAO) SetUserOngoingScheduleToCache(ctx context.Context, userID int, schedule *model.OngoingSchedule) error {
if schedule == nil {
// 如果当前没有正在进行的日程,则缓存空值并短暂过期,避免频繁回源查询。
key := fmt.Sprintf("smartflow:ongoing_schedule:%d", userID)
return d.client.Set(ctx, key, "null", 5*time.Minute).Err()
}
key := fmt.Sprintf("smartflow:ongoing_schedule:%d", userID)
data, err := json.Marshal(schedule)
if err != nil {
return err
}
// 设置过期时间为距离 endTime 的剩余时长;若已过期,则不再写入缓存。
ttl := time.Until(schedule.EndTime)
if ttl <= 0 {
return nil
}
return d.client.Set(ctx, key, data, ttl).Err()
}
func (d *CacheDAO) DeleteUserOngoingScheduleFromCache(ctx context.Context, userID int) error {
key := fmt.Sprintf("smartflow:ongoing_schedule:%d", userID)
return d.client.Del(ctx, key).Err()
}
// SetSchedulePlanPreviewToCache 写入“排程预览”缓存。
//
// 职责边界:
// 1. 负责按 user_id + conversation_id 写入结构化预览快照;
// 2. 负责 preview 入库前的基础参数校验,避免无效 key
// 3. 不负责 DB 回源,不负责业务重试策略。
//
// 步骤化说明:
// 1. 先校验 user_id / conversation_id / preview防止脏写
// 2. 再序列化 preview 为 JSON保证缓存结构稳定
// 3. 最后按固定 TTL 写入 Redis超时后自动失效。
func (d *CacheDAO) SetSchedulePlanPreviewToCache(ctx context.Context, userID int, conversationID string, preview *model.SchedulePlanPreviewCache) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
if preview == nil {
return errors.New("schedule preview is nil")
}
data, err := json.Marshal(preview)
if err != nil {
return fmt.Errorf("marshal schedule preview failed: %w", err)
}
return d.client.Set(ctx, d.schedulePreviewKey(userID, normalizedConversationID), data, 1*time.Hour).Err()
}
// GetSchedulePlanPreviewFromCache 读取“排程预览”缓存。
//
// 输入输出语义:
// 1. 命中时返回 (*SchedulePlanPreviewCache, nil)
// 2. 未命中时返回 (nil, nil)
// 3. Redis 异常或反序列化失败时返回 error。
func (d *CacheDAO) GetSchedulePlanPreviewFromCache(ctx context.Context, userID int, conversationID string) (*model.SchedulePlanPreviewCache, error) {
if d == nil || d.client == nil {
return nil, errors.New("cache dao is not initialized")
}
if userID <= 0 {
return nil, fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return nil, errors.New("conversation_id is empty")
}
raw, err := d.client.Get(ctx, d.schedulePreviewKey(userID, normalizedConversationID)).Result()
if err == redis.Nil {
return nil, nil
}
if err != nil {
return nil, err
}
var preview model.SchedulePlanPreviewCache
if err = json.Unmarshal([]byte(raw), &preview); err != nil {
return nil, fmt.Errorf("unmarshal schedule preview failed: %w", err)
}
return &preview, nil
}
// DeleteSchedulePlanPreviewFromCache 删除“排程预览”缓存。
//
// 说明:
// 1. 删除操作是幂等的key 不存在也视为成功;
// 2. 该方法用于新排程前清旧预览,或状态快照更新后触发失效。
func (d *CacheDAO) DeleteSchedulePlanPreviewFromCache(ctx context.Context, userID int, conversationID string) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
return d.client.Del(ctx, d.schedulePreviewKey(userID, normalizedConversationID)).Err()
}
// IncrConversationTimelineSeq 原子递增并返回会话时间线 seq。
//
// 说明:
// 1. seq 只在同一 user_id + conversation_id 维度内递增;
// 2. 使用 Redis INCR 保证并发下不会拿到重复顺序号;
// 3. 该 key 也会设置 TTL避免长尾会话长期占用缓存。
func (d *CacheDAO) IncrConversationTimelineSeq(ctx context.Context, userID int, conversationID string) (int64, error) {
if d == nil || d.client == nil {
return 0, errors.New("cache dao is not initialized")
}
if userID <= 0 {
return 0, fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return 0, errors.New("conversation_id is empty")
}
key := d.conversationTimelineSeqKey(userID, normalizedConversationID)
pipe := d.client.Pipeline()
incrCmd := pipe.Incr(ctx, key)
pipe.Expire(ctx, key, 24*time.Hour)
if _, err := pipe.Exec(ctx); err != nil {
return 0, err
}
return incrCmd.Val(), nil
}
// SetConversationTimelineSeq 强制设置会话时间线当前 seqDB 回填 Redis 兜底场景)。
func (d *CacheDAO) SetConversationTimelineSeq(ctx context.Context, userID int, conversationID string, seq int64) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
if seq < 0 {
seq = 0
}
return d.client.Set(ctx, d.conversationTimelineSeqKey(userID, normalizedConversationID), seq, 24*time.Hour).Err()
}
// AppendConversationTimelineEventToCache 追加单条时间线缓存事件。
func (d *CacheDAO) AppendConversationTimelineEventToCache(
ctx context.Context,
userID int,
conversationID string,
item model.GetConversationTimelineItem,
) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
data, err := json.Marshal(item)
if err != nil {
return fmt.Errorf("marshal conversation timeline item failed: %w", err)
}
key := d.conversationTimelineKey(userID, normalizedConversationID)
pipe := d.client.Pipeline()
pipe.RPush(ctx, key, data)
pipe.Expire(ctx, key, 24*time.Hour)
_, err = pipe.Exec(ctx)
return err
}
// SetConversationTimelineToCache 全量回填时间线缓存。
func (d *CacheDAO) SetConversationTimelineToCache(ctx context.Context, userID int, conversationID string, items []model.GetConversationTimelineItem) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
key := d.conversationTimelineKey(userID, normalizedConversationID)
pipe := d.client.Pipeline()
pipe.Del(ctx, key)
if len(items) > 0 {
values := make([]interface{}, 0, len(items))
for _, item := range items {
data, err := json.Marshal(item)
if err != nil {
return fmt.Errorf("marshal conversation timeline item failed: %w", err)
}
values = append(values, data)
}
pipe.RPush(ctx, key, values...)
}
pipe.Expire(ctx, key, 24*time.Hour)
_, err := pipe.Exec(ctx)
return err
}
// GetConversationTimelineFromCache 读取时间线缓存(按 seq 正序)。
func (d *CacheDAO) GetConversationTimelineFromCache(ctx context.Context, userID int, conversationID string) ([]model.GetConversationTimelineItem, error) {
if d == nil || d.client == nil {
return nil, errors.New("cache dao is not initialized")
}
if userID <= 0 {
return nil, fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return nil, errors.New("conversation_id is empty")
}
rawItems, err := d.client.LRange(ctx, d.conversationTimelineKey(userID, normalizedConversationID), 0, -1).Result()
if err == redis.Nil {
return nil, nil
}
if err != nil {
return nil, err
}
if len(rawItems) == 0 {
return nil, nil
}
items := make([]model.GetConversationTimelineItem, 0, len(rawItems))
for _, raw := range rawItems {
var item model.GetConversationTimelineItem
if err := json.Unmarshal([]byte(raw), &item); err != nil {
return nil, fmt.Errorf("unmarshal conversation timeline item failed: %w", err)
}
items = append(items, item)
}
return items, nil
}
// DeleteConversationTimelineFromCache 删除时间线缓存和 seq 缓存。
func (d *CacheDAO) DeleteConversationTimelineFromCache(ctx context.Context, userID int, conversationID string) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return errors.New("conversation_id is empty")
}
return d.client.Del(
ctx,
d.conversationTimelineKey(userID, normalizedConversationID),
d.conversationTimelineSeqKey(userID, normalizedConversationID),
).Err()
}
// agentStateKey 返回 agent 运行态快照的 Redis key。
//
// Key 设计:
// 1. 使用 smartflow:agent_state 前缀,与现有 key 命名空间隔离;
// 2. 使用 conversationID 作为唯一标识,因为 agent 状态是按会话维度持久化的。
const activeScheduleSessionCacheTTL = 2 * time.Hour
// activeScheduleSessionKey 生成 session_id 维度的主动调度会话缓存 key。
func (d *CacheDAO) activeScheduleSessionKey(sessionID string) string {
return fmt.Sprintf("smartflow:active_schedule_session:s:%s", strings.TrimSpace(sessionID))
}
// activeScheduleSessionConversationKey 生成 user_id + conversation_id 维度的主动调度会话缓存 key。
func (d *CacheDAO) activeScheduleSessionConversationKey(userID int, conversationID string) string {
return fmt.Sprintf("smartflow:active_schedule_session:u:%d:c:%s", userID, strings.TrimSpace(conversationID))
}
// SetActiveScheduleSessionToCache 同步写入主动调度会话缓存。
//
// 步骤化说明:
// 1. 先校验 snapshot 和主键,避免把无效会话写进 Redis
// 2. 再把同一份快照写入 session_id / conversation_id 两个维度的 key
// 3. 若 conversation_id 还没绑定,只写 session_id key避免生成空路由 key。
func (d *CacheDAO) SetActiveScheduleSessionToCache(ctx context.Context, snapshot *model.ActiveScheduleSessionSnapshot) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if snapshot == nil {
return errors.New("active schedule session snapshot is nil")
}
sessionID := strings.TrimSpace(snapshot.SessionID)
if sessionID == "" {
return errors.New("session_id is empty")
}
data, err := json.Marshal(snapshot)
if err != nil {
return fmt.Errorf("marshal active schedule session cache failed: %w", err)
}
pipe := d.client.Pipeline()
pipe.Set(ctx, d.activeScheduleSessionKey(sessionID), data, activeScheduleSessionCacheTTL)
if conversationID := strings.TrimSpace(snapshot.ConversationID); conversationID != "" && snapshot.UserID > 0 {
pipe.Set(ctx, d.activeScheduleSessionConversationKey(snapshot.UserID, conversationID), data, activeScheduleSessionCacheTTL)
}
_, err = pipe.Exec(ctx)
return err
}
// GetActiveScheduleSessionFromCache 按 session_id 读取主动调度会话缓存。
func (d *CacheDAO) GetActiveScheduleSessionFromCache(ctx context.Context, sessionID string) (*model.ActiveScheduleSessionSnapshot, error) {
if d == nil || d.client == nil {
return nil, errors.New("cache dao is not initialized")
}
normalizedSessionID := strings.TrimSpace(sessionID)
if normalizedSessionID == "" {
return nil, errors.New("session_id is empty")
}
raw, err := d.client.Get(ctx, d.activeScheduleSessionKey(normalizedSessionID)).Result()
if errors.Is(err, redis.Nil) {
return nil, nil
}
if err != nil {
return nil, err
}
var snapshot model.ActiveScheduleSessionSnapshot
if err = json.Unmarshal([]byte(raw), &snapshot); err != nil {
return nil, fmt.Errorf("unmarshal active schedule session cache failed: %w", err)
}
return &snapshot, nil
}
// GetActiveScheduleSessionFromConversationCache 按 user_id + conversation_id 读取主动调度会话缓存。
func (d *CacheDAO) GetActiveScheduleSessionFromConversationCache(ctx context.Context, userID int, conversationID string) (*model.ActiveScheduleSessionSnapshot, error) {
if d == nil || d.client == nil {
return nil, errors.New("cache dao is not initialized")
}
if userID <= 0 {
return nil, fmt.Errorf("invalid user_id: %d", userID)
}
normalizedConversationID := strings.TrimSpace(conversationID)
if normalizedConversationID == "" {
return nil, errors.New("conversation_id is empty")
}
raw, err := d.client.Get(ctx, d.activeScheduleSessionConversationKey(userID, normalizedConversationID)).Result()
if errors.Is(err, redis.Nil) {
return nil, nil
}
if err != nil {
return nil, err
}
var snapshot model.ActiveScheduleSessionSnapshot
if err = json.Unmarshal([]byte(raw), &snapshot); err != nil {
return nil, fmt.Errorf("unmarshal active schedule session cache failed: %w", err)
}
return &snapshot, nil
}
// DeleteActiveScheduleSessionFromCache 删除主动调度会话缓存。
//
// 说明:
// 1. 会同时清理 session_id 和 conversation_id 两个维度,避免旧路由缓存残留;
// 2. conversation_id 为空时只清 session_id key
// 3. 删除操作本身幂等,即使 key 不存在也视为成功。
func (d *CacheDAO) DeleteActiveScheduleSessionFromCache(ctx context.Context, sessionID string, userID int, conversationID string) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
normalizedSessionID := strings.TrimSpace(sessionID)
if normalizedSessionID == "" {
return errors.New("session_id is empty")
}
keys := []string{d.activeScheduleSessionKey(normalizedSessionID)}
if userID > 0 {
if normalizedConversationID := strings.TrimSpace(conversationID); normalizedConversationID != "" {
keys = append(keys, d.activeScheduleSessionConversationKey(userID, normalizedConversationID))
}
}
return d.client.Del(ctx, keys...).Err()
}
func (d *CacheDAO) agentStateKey(conversationID string) string {
return fmt.Sprintf("smartflow:agent_state:%s", conversationID)
}
// SaveAgentState 序列化并保存 agent 运行态快照到 Redis。
//
// 职责边界:
// 1. 只负责 JSON 序列化 + Redis SET不做业务校验
// 2. TTL 默认 2h过期自动清理配合 MySQL outbox 异步持久化;
// 3. snapshot 为 nil 时直接返回,避免写入无效数据。
func (d *CacheDAO) SaveAgentState(ctx context.Context, conversationID string, snapshot any) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
normalizedID := strings.TrimSpace(conversationID)
if normalizedID == "" {
return errors.New("conversation_id is empty")
}
if snapshot == nil {
return nil
}
data, err := json.Marshal(snapshot)
if err != nil {
return fmt.Errorf("marshal agent state failed: %w", err)
}
return d.client.Set(ctx, d.agentStateKey(normalizedID), data, 2*time.Hour).Err()
}
// LoadAgentState 从 Redis 读取并反序列化 agent 运行态快照。
//
// 返回值语义:
// 1. (result, true, nil):命中快照,正常返回;
// 2. (nil, false, nil):未命中,不是错误,调用方应走新建对话路径;
// 3. (nil, false, error)Redis 或反序列化错误。
func (d *CacheDAO) LoadAgentState(ctx context.Context, conversationID string, result any) (bool, error) {
if d == nil || d.client == nil {
return false, errors.New("cache dao is not initialized")
}
normalizedID := strings.TrimSpace(conversationID)
if normalizedID == "" {
return false, errors.New("conversation_id is empty")
}
raw, err := d.client.Get(ctx, d.agentStateKey(normalizedID)).Result()
if errors.Is(err, redis.Nil) {
return false, nil
}
if err != nil {
return false, err
}
if err := json.Unmarshal([]byte(raw), result); err != nil {
return false, fmt.Errorf("unmarshal agent state failed: %w", err)
}
return true, nil
}
// DeleteAgentState 删除指定会话的 agent 运行态快照。
//
// 语义:
// 1. 删除操作是幂等的key 不存在也视为成功;
// 2. 典型调用时机Deliver 节点任务完成后清理。
func (d *CacheDAO) DeleteAgentState(ctx context.Context, conversationID string) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
normalizedID := strings.TrimSpace(conversationID)
if normalizedID == "" {
return errors.New("conversation_id is empty")
}
return d.client.Del(ctx, d.agentStateKey(normalizedID)).Err()
}
// --- 记忆预取缓存 ---
const (
memoryPrefetchTTL = 30 * time.Minute
)
// memoryPrefetchKey 生成用户+会话维度的记忆预取缓存 key。
//
// 1. 格式smartflow:memory_prefetch:u:{userID}:c:{chatID},与 conversationTimelineKey / schedulePreviewKey 命名风格一致;
// 2. chatID 为空时 key 为 smartflow:memory_prefetch:u:5:c:,仍然合法且唯一,不会与其他会话 key 冲突;
// 3. 加 chatID 隔离后,不同会话各自维护独立的预取缓存,避免会话间记忆上下文互相覆盖。
func (d *CacheDAO) memoryPrefetchKey(userID int, chatID string) string {
return fmt.Sprintf("smartflow:memory_prefetch:u:%d:c:%s", userID, chatID)
}
// GetMemoryPrefetchCache 读取用户记忆预取缓存。
//
// 输入输出语义:
// 1. 命中时返回 ItemDTO 切片与 nil error
// 2. 未命中时返回 nil, nil
// 3. Redis 异常或反序列化失败时返回 error。
func (d *CacheDAO) GetMemoryPrefetchCache(ctx context.Context, userID int, chatID string) ([]memorymodel.ItemDTO, error) {
if d == nil || d.client == nil {
return nil, errors.New("cache dao is not initialized")
}
if userID <= 0 {
return nil, nil
}
key := d.memoryPrefetchKey(userID, chatID)
raw, err := d.client.Get(ctx, key).Result()
if errors.Is(err, redis.Nil) {
return nil, nil
}
if err != nil {
return nil, err
}
var items []memorymodel.ItemDTO
if err = json.Unmarshal([]byte(raw), &items); err != nil {
return nil, fmt.Errorf("unmarshal memory prefetch cache failed: %w", err)
}
return items, nil
}
// SetMemoryPrefetchCache 写入用户记忆预取缓存。
//
// 职责边界:
// 1. 负责将检索后的记忆 DTO 写入 Redis供下一轮 Chat 节点即时消费;
// 2. TTL 30 分钟,靠自然过期淘汰,不需要显式 Invalidate
// 3. items 为空或 nil 时直接返回,避免写入无效数据。
func (d *CacheDAO) SetMemoryPrefetchCache(ctx context.Context, userID int, chatID string, items []memorymodel.ItemDTO) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 || len(items) == 0 {
return nil
}
data, err := json.Marshal(items)
if err != nil {
return fmt.Errorf("marshal memory prefetch cache failed: %w", err)
}
key := d.memoryPrefetchKey(userID, chatID)
return d.client.Set(ctx, key, data, memoryPrefetchTTL).Err()
}
// DeleteMemoryPrefetchCacheByUser 删除指定用户所有会话的记忆预取缓存。
//
// 步骤化说明:
// 1. 用 SCAN 遍历 smartflow:memory_prefetch:u:{userID}:c:* 匹配的所有 key
// 2. 用 UNLINK 异步删除,避免阻塞 Redis 主线程;
// 3. 复用 DeleteUserRecentCompletedSchedulesFromCache 的 SCAN+UNLINK 模式;
// 4. 该方法被 GORM cache deleter 和空检索清理两条链路共同调用,保证缓存一致性。
func (d *CacheDAO) DeleteMemoryPrefetchCacheByUser(ctx context.Context, userID int) error {
if d == nil || d.client == nil {
return errors.New("cache dao is not initialized")
}
if userID <= 0 {
return nil
}
pattern := fmt.Sprintf("smartflow:memory_prefetch:u:%d:c:*", userID)
var cursor uint64
for {
keys, next, err := d.client.Scan(ctx, cursor, pattern, 500).Result()
if err != nil {
return err
}
if len(keys) > 0 {
// 1. UNLINK 是 DEL 的异步版本,不会阻塞 Redis 主线程;
// 2. 即使 key 不存在也不会报错,幂等安全。
if err := d.client.Unlink(ctx, keys...).Err(); err != nil {
return err
}
}
cursor = next
if cursor == 0 {
break
}
}
return nil
}

View File

@@ -0,0 +1,50 @@
package dao
import (
"context"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"gorm.io/gorm"
)
type CourseDAO struct {
db *gorm.DB
}
// NewCourseDAO 创建ScheduleDAO实例
func NewCourseDAO(db *gorm.DB) *CourseDAO {
return &CourseDAO{
db: db,
}
}
func (r *CourseDAO) WithTx(tx *gorm.DB) *CourseDAO {
return &CourseDAO{db: tx}
}
func (r *CourseDAO) AddUserCoursesIntoSchedule(ctx context.Context, courses []model.Schedule) error {
if err := r.db.WithContext(ctx).Create(&courses).Error; err != nil {
return err
}
return nil
}
func (r *CourseDAO) AddUserCoursesIntoScheduleEvents(ctx context.Context, events []model.ScheduleEvent) ([]int, error) {
if err := r.db.WithContext(ctx).Create(&events).Error; err != nil {
return nil, err
}
ids := make([]int, 0, len(events))
for i := range events {
ids = append(ids, events[i].ID)
}
return ids, nil
}
// Transaction 在同一个数据库事务中执行传入的函数,供 service 层复用(自动提交/回滚)
// 规则fn 返回 nil \-\> 提交fn 返回 error 或发生 panic \-\> 回滚
// 说明gorm\.\(\\\*DB\)\.Transaction 会在 fn 返回 error 时回滚,并在发生 panic 时自动回滚后继续向上抛出 panic
func (r *CourseDAO) Transaction(fn func(txDAO *CourseDAO) error) error {
return r.db.Transaction(func(tx *gorm.DB) error {
return fn(NewCourseDAO(tx))
})
}

View File

@@ -0,0 +1,671 @@
package dao
import (
"context"
"errors"
"fmt"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"github.com/LoveLosita/smartflow/backend/shared/respond"
"gorm.io/gorm"
)
type ScheduleDAO struct {
db *gorm.DB
}
// NewScheduleDAO 创建TaskClassDAO实例
func NewScheduleDAO(db *gorm.DB) *ScheduleDAO {
return &ScheduleDAO{
db: db,
}
}
func (d *ScheduleDAO) WithTx(tx *gorm.DB) *ScheduleDAO {
return &ScheduleDAO{db: tx}
}
func (d *ScheduleDAO) AddSchedules(schedules []model.Schedule) ([]int, error) {
if err := d.db.Create(&schedules).Error; err != nil {
return nil, err
}
ids := make([]int, len(schedules))
for i, s := range schedules {
ids[i] = s.ID
}
return ids, nil
}
func (d *ScheduleDAO) EmbedTaskIntoSchedule(startSection, endSection, dayOfWeek, week, userID, taskID int) error {
// 仅更新指定:用户/周/星期/节次区间 的记录,将 embedded_task_id 精准写入 taskID
res := d.db.
Table("schedules").
Where("user_id = ? AND week = ? AND day_of_week = ? AND section BETWEEN ? AND ?", userID, week, dayOfWeek, startSection, endSection).
Update("embedded_task_id", taskID)
return res.Error
}
func (d *ScheduleDAO) GetCourseUserIDByID(ctx context.Context, courseScheduleEventID int) (int, error) {
type row struct {
UserID *int `gorm:"column:user_id"`
}
var r row
err := d.db.WithContext(ctx).
Table("schedule_events").
Select("user_id").
Where("id = ?", courseScheduleEventID).
First(&r).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return 0, respond.WrongCourseID
}
return 0, err
}
if r.UserID == nil {
return 0, respond.WrongCourseID
}
return *r.UserID, nil
}
// IsCourseEmbeddedByOtherTaskBlock 判断课程在给定节次区间内是否已被其他任务块嵌入(用于业务限制)
func (d *ScheduleDAO) IsCourseEmbeddedByOtherTaskBlock(ctx context.Context, courseID, startSection, endSection int) (bool, error) {
// 若区间非法,视为不冲突
if startSection <= 0 || endSection <= 0 || startSection > endSection {
return false, nil
}
var cnt int64
err := d.db.WithContext(ctx).
Table("schedules").
Where("id = ?", courseID).
Where("section BETWEEN ? AND ?", startSection, endSection).
Where("embedded_task_id IS NOT NULL AND embedded_task_id <> 0").
Count(&cnt).Error
if err != nil {
return false, err
}
return cnt > 0, nil
}
func (d *ScheduleDAO) HasUserScheduleConflict(ctx context.Context, userID, week, dayOfWeek int, sections []int) (bool, error) {
// 无节次则视为无冲突
if len(sections) == 0 {
return false, nil
}
// 统计同一用户、同一周、同一天、且节次有交集的排程数量
// 约定表字段user_id, week, day_of_week, section
var cnt int64
err := d.db.WithContext(ctx).
Table("schedules").
Where("user_id = ? AND week = ? AND day_of_week = ?", userID, week, dayOfWeek).
Where("section IN ?", sections).
Count(&cnt).Error
if err != nil {
return false, err
}
return cnt > 0, nil
}
func (d *ScheduleDAO) IsCourseTimeMatch(ctx context.Context, courseScheduleEventID, week, dayOfWeek, startSection, endSection int) (bool, error) {
// 区间非法直接不匹配
if startSection <= 0 || endSection <= 0 || startSection > endSection {
return false, nil
}
// 核对该课程事件在指定 周\+星期 下,是否存在覆盖整个节次区间的排程记录
// 说明此处按你当前表结构的用法schedule\_events 存事件schedules 存节次明细)来写:
// schedules 里通过 schedule\_event\_id 关联到 schedule\_events.id
var cnt int64
err := d.db.WithContext(ctx).
Table("schedules").
Where("event_id = ?", courseScheduleEventID).
Where("week = ? AND day_of_week = ?", week, dayOfWeek).
Where("section BETWEEN ? AND ?", startSection, endSection).
Count(&cnt).Error
if err != nil {
return false, err
}
// 需要区间内的每一节都存在记录才算匹配
return cnt == int64(endSection-startSection+1), nil
}
func (d *ScheduleDAO) AddScheduleEvent(scheduleEvent *model.ScheduleEvent) (int, error) {
if err := d.db.Create(&scheduleEvent).Error; err != nil {
return 0, err
}
return scheduleEvent.ID, nil
}
// CheckScheduleConflict 检查给定的 Schedule 切片中是否存在课程的冲突(即同一用户、同一周、同一天、且节次有交集的记录,并且只管课程,不管其它任务类型)
func (d *ScheduleDAO) CheckScheduleConflict(ctx context.Context, schedules []model.Schedule) (bool, error) {
if len(schedules) == 0 {
return false, nil
}
// 聚合:同一 user/week/day 的节次去重后一次性查库
type key struct {
UserID int
Week int
DayOfWeek int
}
groups := make(map[key]map[int]struct{})
for _, s := range schedules {
// 基础字段不合法直接跳过(按不冲突处理)
if s.UserID <= 0 || s.Week <= 0 || s.DayOfWeek <= 0 || s.Section <= 0 {
continue
}
k := key{UserID: s.UserID, Week: s.Week, DayOfWeek: s.DayOfWeek}
if _, ok := groups[k]; !ok {
groups[k] = make(map[int]struct{})
}
groups[k][s.Section] = struct{}{}
}
for k, set := range groups {
if len(set) == 0 {
continue
}
sections := make([]int, 0, len(set))
for sec := range set {
sections = append(sections, sec)
}
// 仅判断“课程type=course”是否冲突
// schedules.event_id -> schedule_events.id再用 schedule_events.type 过滤
var cnt int64
err := d.db.WithContext(ctx).
Table("schedules s").
Joins("JOIN schedule_events e ON e.id = s.event_id").
Where("s.user_id = ? AND s.week = ? AND s.day_of_week = ?", k.UserID, k.Week, k.DayOfWeek).
Where("s.section IN ?", sections).
Where("e.type = ?", "course").
Count(&cnt).Error
if err != nil {
return false, err
}
if cnt > 0 {
return true, nil
}
}
return false, nil
}
func (d *ScheduleDAO) GetNonCourseScheduleConflicts(ctx context.Context, newSchedules []model.Schedule) ([]model.Schedule, error) {
if len(newSchedules) == 0 {
return nil, nil
}
// 1. 构建指纹图:用于快速比对坐标
userID := newSchedules[0].UserID
weeksMap := make(map[int]bool)
newSlotsFingerprints := make(map[string]bool)
for _, s := range newSchedules {
weeksMap[s.Week] = true
key := fmt.Sprintf("%d-%d-%d", s.Week, s.DayOfWeek, s.Section)
newSlotsFingerprints[key] = true
}
weeks := make([]int, 0, len(weeksMap))
for w := range weeksMap {
weeks = append(weeks, w)
}
// 2. 第一步:定义一个临时小结构体,精准捞取坐标和 EventID
type simpleSlot struct {
EventID int
Week int
DayOfWeek int
Section int
}
var candidates []simpleSlot
// 💡 这里的逻辑:只查索引覆盖到的字段,速度极快
err := d.db.WithContext(ctx).
Table("schedules").
Select("schedules.event_id, schedules.week, schedules.day_of_week, schedules.section").
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
Where("schedules.user_id = ? AND schedules.week IN ? AND schedule_events.type != ?", userID, weeks, "course").
Scan(&candidates).Error
if err != nil {
return nil, err
}
// 3. 筛选出真正碰撞的 EventID
eventIDMap := make(map[int]bool)
for _, s := range candidates {
key := fmt.Sprintf("%d-%d-%d", s.Week, s.DayOfWeek, s.Section)
if newSlotsFingerprints[key] {
eventIDMap[s.EventID] = true
}
}
if len(eventIDMap) == 0 {
return nil, nil
}
// 4. 第二步:“抄全家”——根据碰撞到的 ID 捞出这些任务的所有原子槽位
var ids []int
for id := range eventIDMap {
ids = append(ids, id)
}
var fullConflicts []model.Schedule
// 💡 关键:这里必须 Preload("Event"),这样 DTO 才有名称显示
err = d.db.WithContext(ctx).
Preload("Event").
Where("event_id IN ?", ids).
Find(&fullConflicts).Error
return fullConflicts, err
}
func (d *ScheduleDAO) GetUserTodaySchedule(ctx context.Context, userID, week, dayOfWeek int) ([]model.Schedule, error) {
var schedules []model.Schedule
// 1. Preload("Event"): 拿到课程/任务的基础信息(名、地、型)
// 2. Preload("EmbeddedTask"): 拿到“水课”里嵌入的具体任务详情
err := d.db.WithContext(ctx).
Preload("Event").
Preload("EmbeddedTask").
Where("user_id = ? AND week = ? AND day_of_week = ?", userID, week, dayOfWeek).
Order("section ASC").
Find(&schedules).Error
if err != nil {
return nil, err
}
return schedules, nil
}
func (d *ScheduleDAO) GetUserWeeklySchedule(ctx context.Context, userID, week int) ([]model.Schedule, error) {
var schedules []model.Schedule
err := d.db.WithContext(ctx).
Preload("Event").
Preload("EmbeddedTask").
Where("user_id = ? AND week = ?", userID, week).
Order("day_of_week ASC, section ASC").
Find(&schedules).Error
if err != nil {
return nil, err
}
return schedules, nil
}
func (d *ScheduleDAO) DeleteScheduleEventAndSchedule(ctx context.Context, eventID int, userID int) error {
return d.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
// 先查出要删除的 schedules让 GORM 在 Delete 时能带上模型字段(供钩子读取 UserID/Week
var schedules []model.Schedule
if err := tx.
Where("event_id = ? AND user_id = ?", eventID, userID).
Find(&schedules).Error; err != nil {
return err
}
// 显式删子表 schedules触发 schedules 的 GORM Delete 回调/插件)
if len(schedules) > 0 {
if err := tx.Delete(&schedules).Error; err != nil {
return err
}
}
// 再删父表 schedule_events同样触发回调/插件)
res := tx.Where("id = ? AND user_id = ?", eventID, userID).
Delete(&model.ScheduleEvent{})
if res.Error != nil {
return res.Error
}
if res.RowsAffected == 0 {
return respond.WrongScheduleEventID
}
return nil
})
}
func (d *ScheduleDAO) GetScheduleTypeByEventID(ctx context.Context, eventID, userID int) (string, error) {
type row struct {
Type *string `gorm:"column:type"`
}
var r row
err := d.db.WithContext(ctx).
Table("schedule_events").
Select("type").
Where("id = ? AND user_id=?", eventID, userID).
First(&r).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return "", respond.WrongScheduleEventID // 事件不存在或不属于该用户,统一返回错误
}
return "", err
}
if r.Type == nil {
return "", respond.WrongScheduleEventID
}
return *r.Type, nil
}
func (d *ScheduleDAO) GetScheduleEmbeddedTaskID(ctx context.Context, eventID int) (int, error) {
// embedded_task_id 存在于 schedules 表中(按 event_id 聚合取一个非空值)
// 若该事件没有任何嵌入任务,则返回 0, nil
type row struct {
EmbeddedTaskID *int `gorm:"column:embedded_task_id"`
}
var r row
err := d.db.WithContext(ctx).
Table("schedules").
Select("embedded_task_id").
Where("event_id = ?", eventID).
Where("embedded_task_id IS NOT NULL AND embedded_task_id <> 0").
Order("id ASC").
Limit(1).
Scan(&r).Error
if err != nil {
return 0, err
}
if r.EmbeddedTaskID == nil { // 没有任何嵌入任务
return 0, nil
}
return *r.EmbeddedTaskID, nil
}
func (d *ScheduleDAO) IfScheduleEventIDExists(ctx context.Context, eventID int) (bool, error) {
var count int64
err := d.db.WithContext(ctx).
Table("schedule_events").
Where("id = ?", eventID).
Count(&count).Error
if err != nil {
return false, err
}
return count > 0, nil
}
func (d *ScheduleDAO) SetScheduleEmbeddedTaskIDToNull(ctx context.Context, eventID int) (int, error) {
// 先取出该事件当前嵌入的任务 id若没有嵌入则返回对应业务错误
embeddedTaskID, err := d.GetScheduleEmbeddedTaskID(ctx, eventID)
if err != nil {
return 0, err
}
if embeddedTaskID == 0 {
return 0, respond.TargetScheduleNotHaveEmbeddedTask
}
// 将 schedules 表中指定 event_id 的 embedded_task_id 字段置空(用于解除嵌入关系)
res := d.db.WithContext(ctx).
Table("schedules").
Where("event_id = ?", eventID).
Where("embedded_task_id IS NOT NULL AND embedded_task_id <> 0").
Update("embedded_task_id", nil)
if res.Error != nil {
return 0, res.Error
}
if res.RowsAffected == 0 {
return 0, respond.TargetScheduleNotHaveEmbeddedTask
}
return embeddedTaskID, nil
}
func (d *ScheduleDAO) FindEmbeddedTaskIDAndDeleteIt(ctx context.Context, taskID int) (int, error) {
// 1. 先找到 schedules 表中 embedded_task_id = taskID 的记录,获取对应的 event_id。
// 1.1 该 taskID 可能是“嵌入课程”的任务块,也可能是“独立任务日程”的任务块;
// 1.2 两者撤销策略不同:课程只清 embedded_task_id独立任务需要删除 schedules 后再删 event。
type row struct {
EventID *int `gorm:"column:event_id"`
}
var r row
err := d.db.WithContext(ctx).
Table("schedules").
Select("event_id").
Where("embedded_task_id = ?", taskID).
Order("id ASC").
Limit(1).
Scan(&r).Error
if err != nil {
return 0, err
}
if r.EventID == nil {
return 0, respond.TargetTaskNotEmbeddedInAnySchedule
}
eventID := *r.EventID
var event model.ScheduleEvent
if err := d.db.WithContext(ctx).
Where("id = ?", eventID).
First(&event).Error; err != nil {
return 0, err
}
if event.Type == "task" && event.RelID != nil && *event.RelID == taskID {
// 2. 独立任务日程schedules.event_id 是外键,必须先删原子槽位再删事件。
if err := d.db.WithContext(ctx).
Table("schedules").
Where("event_id = ?", eventID).
Delete(&model.Schedule{}).Error; err != nil {
return 0, err
}
res := d.db.WithContext(ctx).
Table("schedule_events").
Where("id = ?", eventID).
Delete(&model.ScheduleEvent{})
if res.Error != nil {
return 0, res.Error
}
if res.RowsAffected == 0 {
return 0, respond.TargetTaskNotEmbeddedInAnySchedule
}
return eventID, nil
}
// 3. 嵌入课程:保留课程事件与课程槽位,只清空 embedded_task_id。
clearRes := d.db.WithContext(ctx).
Table("schedules").
Where("embedded_task_id = ?", taskID).
Update("embedded_task_id", nil)
if clearRes.Error != nil {
return 0, clearRes.Error
}
if clearRes.RowsAffected == 0 {
return 0, respond.TargetTaskNotEmbeddedInAnySchedule
}
return eventID, nil
}
func (d *ScheduleDAO) DeleteScheduleEventByTaskItemID(ctx context.Context, taskItemID int) error {
// 1. 先找 type=task 且 rel_id=taskItemID 的正式事件;若前一步已经删除则保持幂等成功。
var eventIDs []int
if err := d.db.WithContext(ctx).
Table("schedule_events").
Where("type = ? AND rel_id = ?", "task", taskItemID).
Pluck("id", &eventIDs).Error; err != nil {
return err
}
if len(eventIDs) == 0 {
return nil
}
// 2. schedules.event_id 指向 schedule_events.id删除顺序必须先子表后父表。
if err := d.db.WithContext(ctx).
Table("schedules").
Where("event_id IN ?", eventIDs).
Delete(&model.Schedule{}).Error; err != nil {
return err
}
return d.db.WithContext(ctx).
Table("schedule_events").
Where("id IN ?", eventIDs).
Delete(&model.ScheduleEvent{}).Error
}
func (d *ScheduleDAO) GetUserRecentCompletedSchedules(ctx context.Context, nowTime time.Time, userID int, index, limit int) ([]model.Schedule, error) {
var schedules []model.Schedule
err := d.db.WithContext(ctx).
Preload("Event").
Preload("EmbeddedTask").
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
// 修改后的核心逻辑:
// 1. 用户匹配 & 已结束
// 2. 满足 (事件本身是任务) OR (虽然是课程但嵌入了任务)
Where("schedules.user_id = ? AND schedule_events.end_time < ? AND (schedule_events.type = ? OR schedules.embedded_task_id IS NOT NULL)",
userID, nowTime, "task").
Order("schedule_events.end_time DESC"). // 命中索引
Offset(index).
Limit(limit).
Find(&schedules).Error
if err != nil {
return nil, err
}
return schedules, nil
}
func (d *ScheduleDAO) GetScheduleEventWeekByID(ctx context.Context, eventID int) (int, error) {
type row struct {
Week *int `gorm:"column:week"`
}
var r row
err := d.db.WithContext(ctx).
Table("schedules").
Select("week").
Where("event_id = ?", eventID).
Order("id ASC").
Limit(1).
Scan(&r).Error
if err != nil {
return 0, err
}
if r.Week == nil {
return 0, respond.WrongScheduleEventID
}
return *r.Week, nil
}
func (d *ScheduleDAO) GetUserOngoingSchedule(ctx context.Context, userID int, nowTime time.Time) ([]model.Schedule, error) {
var schedules []model.Schedule
err := d.db.WithContext(ctx).
Preload("Event").
Preload("EmbeddedTask").
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
Where("schedules.user_id = ? AND schedule_events.start_time <= ? AND schedule_events.end_time >= ?",
userID, nowTime, nowTime).
Or("schedules.user_id = ? AND schedule_events.start_time > ?",
userID, nowTime).
Order("schedule_events.start_time ASC"). // 命中索引
Find(&schedules).Error
if err != nil {
return nil, err
}
return schedules, nil
}
func (d *ScheduleDAO) RevocateSchedulesByEventID(ctx context.Context, eventID int) error {
// 将 schedules 表中指定 event_id 的 embedded_task_id 字段置空(用于撤销嵌入关系)
res := d.db.WithContext(ctx).
Table("schedules").
Where("event_id = ?", eventID).
Update("status", "interrupted")
if res.RowsAffected == 0 {
return respond.WrongScheduleEventID
}
return res.Error
}
func (d *ScheduleDAO) GetRelIDByScheduleEventID(ctx context.Context, eventID int) (int, error) {
type row struct {
RelID *int `gorm:"column:rel_id"`
}
var r row
err := d.db.WithContext(ctx).
Table("schedule_events").
Select("rel_id").
Where("id = ?", eventID).
First(&r).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return 0, respond.WrongScheduleEventID
}
return 0, err
}
if r.RelID == nil {
return 0, nil
}
return *r.RelID, nil
}
func (d *ScheduleDAO) GetUserSchedulesByTimeRange(ctx context.Context, userID int, startTime, endTime time.Time) ([]model.Schedule, error) {
var schedules []model.Schedule
err := d.db.WithContext(ctx).
Preload("Event").
Preload("EmbeddedTask").
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
Where("schedules.user_id = ? AND schedule_events.start_time >= ? AND schedule_events.end_time <= ?",
userID, startTime, endTime).
Order("schedule_events.start_time ASC"). // 命中索引
Find(&schedules).Error
if err != nil {
return nil, err
}
return schedules, nil
}
func (d *ScheduleDAO) BatchEmbedTaskIntoSchedule(ctx context.Context, eventIDs, taskItemIDs []int) error {
if len(eventIDs) == 0 {
return nil
}
if len(eventIDs) != len(taskItemIDs) {
return fmt.Errorf("eventIDs length != taskItemIDs length")
}
db := d.db.WithContext(ctx)
for i, eventID := range eventIDs {
taskItemID := taskItemIDs[i]
// 1) 校验该 event 是否为 course
var typ string
if err := db.
Table("schedule_events").
Select("type").
Where("id = ?", eventID).
Scan(&typ).Error; err != nil {
return err
}
if typ != "course" {
continue
}
// 2) 一 event 对多 schedules批量写入 embedded_task_id
if err := db.
Table("schedules").
Where("event_id = ?", eventID).
Update("embedded_task_id", taskItemID).Error; err != nil {
return err
}
}
return nil
}
func (d *ScheduleDAO) InsertScheduleEvents(ctx context.Context, events []model.ScheduleEvent) ([]int, error) {
if len(events) == 0 {
return nil, nil
}
if err := d.db.WithContext(ctx).Create(&events).Error; err != nil {
return nil, err
}
ids := make([]int, len(events))
for i, e := range events {
ids[i] = e.ID
}
return ids, nil
}

View File

@@ -0,0 +1,346 @@
package dao
import (
"context"
"errors"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"github.com/LoveLosita/smartflow/backend/shared/respond"
"gorm.io/gorm"
)
type TaskClassDAO struct {
// 这是一个口袋,用来装数据库连接实例
db *gorm.DB
}
// NewTaskClassDAO 创建TaskClassDAO实例
// NewTaskClassDAO 接收一个 *gorm.DB并把它塞进结构体的口袋里
func NewTaskClassDAO(db *gorm.DB) *TaskClassDAO {
return &TaskClassDAO{
db: db,
}
}
func (dao *TaskClassDAO) WithTx(tx *gorm.DB) *TaskClassDAO {
return &TaskClassDAO{
db: tx,
}
}
// AddOrUpdateTaskClass 为指定用户添加/更新任务类(防越权:更新时限定 user_id
func (dao *TaskClassDAO) AddOrUpdateTaskClass(userID int, taskClass *model.TaskClass) (int, error) {
// 不信任入参里的 UserID强制使用当前登录用户
taskClass.UserID = &userID
// 新增ID == 0 直接插入
if taskClass.ID == 0 {
if err := dao.db.Create(taskClass).Error; err != nil {
return 0, err
}
return taskClass.ID, nil
}
// 更新:必须同时匹配 id + user_id否则不会更新任何行避免覆盖他人数据
tx := dao.db.Model(&model.TaskClass{}).
Where("id = ? AND user_id = ?", taskClass.ID, userID).
Updates(taskClass)
if tx.Error != nil {
return 0, tx.Error
}
if tx.RowsAffected == 0 {
// 未匹配到记录:要么不存在,要么不属于该用户
return 0, respond.UserTaskClassForbidden
}
return taskClass.ID, nil
}
func (dao *TaskClassDAO) AddOrUpdateTaskClassItems(userID int, items []model.TaskClassItem) error {
if len(items) == 0 {
return nil
}
// 1) 校验这些 items 关联的 task_classcategory_id都属于当前用户
categoryIDSet := make(map[int]struct{}, len(items))
var categoryIDs []int
for _, it := range items {
if *it.CategoryID == 0 {
return gorm.ErrRecordNotFound
}
if _, ok := categoryIDSet[*it.CategoryID]; !ok {
categoryIDSet[*it.CategoryID] = struct{}{}
categoryIDs = append(categoryIDs, *it.CategoryID)
}
}
var count int64
if err := dao.db.Model(&model.TaskClass{}).
Where("id IN ? AND user_id = ?", categoryIDs, userID).
Count(&count).Error; err != nil {
return err
}
if count != int64(len(categoryIDs)) {
return respond.UserTaskClassForbidden
}
// 2) 新增与更新分开处理:新增不受影响;更新时限定 category_id防越权
var toCreate []model.TaskClassItem
for _, it := range items {
if it.ID == 0 {
toCreate = append(toCreate, it)
continue
}
tx := dao.db.Model(&model.TaskClassItem{}).
Where("id = ? AND category_id IN ?", it.ID, categoryIDs).
Updates(map[string]any{
"category_id": it.CategoryID,
})
if tx.Error != nil {
return tx.Error
}
if tx.RowsAffected == 0 {
return respond.UserTaskClassForbidden
}
}
if len(toCreate) > 0 {
if err := dao.db.Create(&toCreate).Error; err != nil {
return err
}
}
return nil
}
// Transaction 在一个事务中执行传入的函数,供 service 层复用(自动提交/回滚)
// 规则fn 返回 nil -> commitfn 返回 error 或 panic -> rollback
func (dao *TaskClassDAO) Transaction(fn func(txDAO *TaskClassDAO) error) error {
return dao.db.Transaction(func(tx *gorm.DB) error {
return fn(NewTaskClassDAO(tx))
})
}
func (dao *TaskClassDAO) GetUserTaskClasses(userID int) ([]model.TaskClass, error) {
var taskClasses []model.TaskClass
err := dao.db.Where("user_id = ?", userID).Find(&taskClasses).Error
if err != nil {
return nil, err
}
return taskClasses, nil
}
// GetCompleteTaskClassByID 带着 ID 和 UserID 去取,防越权
func (dao *TaskClassDAO) GetCompleteTaskClassByID(ctx context.Context, id int, userID int) (*model.TaskClass, error) {
var taskClass model.TaskClass
// 1. 使用 Preload("Items") 自动执行两条 SQL 并组装
// SQL A: SELECT * FROM task_classes WHERE id = ? AND user_id = ?
// SQL B: SELECT * FROM task_class_items WHERE category_id = (SQL A 的 ID)
err := dao.db.WithContext(ctx).
Preload("Items").
Where("id = ? AND user_id = ?", id, userID).
First(&taskClass).Error
if err != nil {
return nil, err
}
return &taskClass, nil
}
// GetCompleteTaskClassesByIDs 批量获取“完整任务类”(含 Items
//
// 职责边界:
// 1. 负责按 user_id + ids 过滤,保证数据归属安全;
// 2. 负责预加载 Items供智能粗排直接使用
// 3. 不负责排序策略,返回结果顺序由 service 层决定;
// 4. 若存在任一 id 不存在或不属于该用户,返回 WrongTaskClassID。
func (dao *TaskClassDAO) GetCompleteTaskClassesByIDs(ctx context.Context, userID int, ids []int) ([]model.TaskClass, error) {
if len(ids) == 0 {
return []model.TaskClass{}, nil
}
// 1. 先做去重与合法值过滤,避免无效 ID 放大数据库压力。
uniqueIDs := make([]int, 0, len(ids))
seen := make(map[int]struct{}, len(ids))
for _, id := range ids {
if id <= 0 {
continue
}
if _, exists := seen[id]; exists {
continue
}
seen[id] = struct{}{}
uniqueIDs = append(uniqueIDs, id)
}
if len(uniqueIDs) == 0 {
return nil, respond.WrongTaskClassID
}
// 2. 批量查询并预加载任务项。
var taskClasses []model.TaskClass
err := dao.db.WithContext(ctx).
Preload("Items").
Where("user_id = ? AND id IN ?", userID, uniqueIDs).
Find(&taskClasses).Error
if err != nil {
return nil, err
}
// 3. 数量校验:少一条都视为“存在非法/越权 ID”统一按业务错误返回。
if len(taskClasses) != len(uniqueIDs) {
return nil, respond.WrongTaskClassID
}
return taskClasses, nil
}
func (dao *TaskClassDAO) GetTaskClassItemByID(ctx context.Context, id int) (*model.TaskClassItem, error) {
var item model.TaskClassItem
err := dao.db.WithContext(ctx).
Where("id = ?", id).
First(&item).Error
if err != nil {
return nil, err
}
return &item, nil
}
func (dao *TaskClassDAO) GetTaskClassIDByTaskItemID(ctx context.Context, itemID int) (int, error) {
var item model.TaskClassItem
res := dao.db.WithContext(ctx).
Select("category_id").
Where("id = ?", itemID).
First(&item)
if res.Error != nil {
if errors.Is(res.Error, gorm.ErrRecordNotFound) {
return 0, respond.TaskClassItemNotFound
}
return 0, res.Error
}
return *item.CategoryID, nil
}
func (dao *TaskClassDAO) GetTaskClassUserIDByID(ctx context.Context, taskClassID int) (int, error) {
var taskClass model.TaskClass
err := dao.db.WithContext(ctx).
Select("user_id").
Where("id = ?", taskClassID).
First(&taskClass).Error
if err != nil {
return 0, err
}
return *taskClass.UserID, nil
}
func (dao *TaskClassDAO) UpdateTaskClassItemEmbeddedTime(ctx context.Context, taskID int, embeddedTime *model.TargetTime) error {
err := dao.db.WithContext(ctx).
Model(&model.TaskClassItem{}).
Where("id = ?", taskID).
Update("embedded_time", embeddedTime).Error
return err
}
func (dao *TaskClassDAO) DeleteTaskClassItemEmbeddedTime(ctx context.Context, taskID int) error {
err := dao.db.WithContext(ctx).
Model(&model.TaskClassItem{}).
Where("id = ?", taskID).
Update("embedded_time", nil).Error
return err
}
func (dao *TaskClassDAO) IfTaskClassItemArranged(ctx context.Context, taskID int) (bool, error) {
var item model.TaskClassItem
err := dao.db.WithContext(ctx).
Select("embedded_time").
Where("id = ?", taskID).
First(&item).Error
if err != nil {
return false, err
}
return item.EmbeddedTime != nil, nil
}
func (dao *TaskClassDAO) BatchCheckIfTaskClassItemsArranged(ctx context.Context, itemIDs []int) (bool, error) {
if len(itemIDs) == 0 {
return false, nil
}
var count int64
err := dao.db.WithContext(ctx).
Model(&model.TaskClassItem{}).
Where("id IN ? AND embedded_time IS NOT NULL", itemIDs).
Count(&count).Error
if err != nil {
return false, err
}
return count > 0, nil
}
func (dao *TaskClassDAO) DeleteTaskClassItemByID(ctx context.Context, id int) error {
err := dao.db.WithContext(ctx).
Where("id = ?", id).
Delete(&model.TaskClassItem{}).Error
return err
}
func (dao *TaskClassDAO) DeleteTaskClassByID(ctx context.Context, id int) error {
res := dao.db.WithContext(ctx).
Where("id = ?", id).
Delete(&model.TaskClass{})
if res.Error != nil {
return res.Error
}
if res.RowsAffected == 0 {
return respond.WrongTaskClassID
}
return nil
}
func (dao *TaskClassDAO) BatchUpdateTaskClassItemEmbeddedTime(ctx context.Context, itemIDs []int, updates []*model.TargetTime) error {
if len(itemIDs) == 0 {
return nil
}
if len(itemIDs) != len(updates) {
return errors.New("itemIDs length mismatch updates length")
}
// 单条 SQL 批量更新UPDATE ... SET embedded_time = CASE id WHEN ? THEN ? ... END WHERE id IN (?)
caseSQL := "CASE id"
args := make([]any, 0, len(itemIDs)*2)
for i, id := range itemIDs {
caseSQL += " WHEN ? THEN ?"
args = append(args, id, updates[i])
}
caseSQL += " END"
res := dao.db.WithContext(ctx).
Model(&model.TaskClassItem{}).
Where("id IN ?", itemIDs).
Update("embedded_time", gorm.Expr(caseSQL, args...))
return res.Error
}
func (dao *TaskClassDAO) ValidateTaskItemIDsBelongToTaskClass(ctx context.Context, taskClassID int, itemIDs []int) (bool, error) {
if len(itemIDs) == 0 {
return true, nil
}
var count int64
err := dao.db.WithContext(ctx).
Model(&model.TaskClassItem{}).
Where("id IN ? AND category_id = ?", itemIDs, taskClassID).
Count(&count).Error
if err != nil {
return false, err
}
return count == int64(len(itemIDs)), nil
}
func (dao *TaskClassDAO) GetTaskClassItemsByIDs(ctx context.Context, itemIDs []int) ([]model.TaskClassItem, error) {
var items []model.TaskClassItem
err := dao.db.WithContext(ctx).
Where("id IN ?", itemIDs).
Find(&items).Error
if err != nil {
return nil, err
}
return items, nil
}

View File

@@ -0,0 +1,341 @@
package dao
import (
"context"
"errors"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
"github.com/LoveLosita/smartflow/backend/shared/respond"
"gorm.io/gorm"
)
type TaskDAO struct {
// 这是一个口袋,用来装数据库连接实例
db *gorm.DB
}
// NewTaskDAO 创建TaskDAO实例
// NewTaskDAO 接收一个 *gorm.DB并把它塞进结构体的口袋里
func NewTaskDAO(db *gorm.DB) *TaskDAO {
return &TaskDAO{
db: db,
}
}
func (r *TaskDAO) WithTx(tx *gorm.DB) *TaskDAO {
return &TaskDAO{db: tx}
}
// AddTask 为指定用户添加任务
func (dao *TaskDAO) AddTask(req *model.Task) (*model.Task, error) {
if err := dao.db.Create(req).Error; err != nil {
return nil, err
}
return req, nil
}
func (dao *TaskDAO) GetTasksByUserID(userID int) ([]model.Task, error) {
var tasks []model.Task
if err := dao.db.Where("user_id = ?", userID).Find(&tasks).Error; err != nil {
return nil, err
}
if len(tasks) == 0 { // 如果没有任务,返回自定义错误
return nil, respond.UserTasksEmpty
}
return tasks, nil
}
// CompleteTaskByID 将指定任务标记为"已完成"。
//
// 职责边界:
// 1. 只负责"当前用户 + 指定 task_id"的完成状态更新;
// 2. 不负责幂等中间件(由路由层统一挂载);
// 3. 不负责业务层响应包装(由 Service 层处理)。
//
// 返回语义:
// 1. 第一个返回值 *model.Task返回更新后的任务快照至少含 ID/UserID/IsCompleted
// 2. 第二个返回值 bool
// 2.1 true任务原本就已完成本次属于幂等命中
// 2.2 false本次从未完成成功更新为已完成
// 3. error
// 3.1 gorm.ErrRecordNotFound任务不存在或不属于当前用户
// 3.2 其他 error数据库异常。
func (dao *TaskDAO) CompleteTaskByID(ctx context.Context, userID int, taskID int) (*model.Task, bool, error) {
// 1. 基础兜底:非法参数直接返回"记录不存在"语义,避免下游误写。
if userID <= 0 || taskID <= 0 {
return nil, false, gorm.ErrRecordNotFound
}
// 2. 先查询目标任务,明确区分"已完成"与"不存在"。
var target model.Task
findErr := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&target).Error
if findErr != nil {
return nil, false, findErr
}
// 3. 若任务已完成,直接按幂等成功返回,不再写库。
if target.IsCompleted {
return &target, true, nil
}
// 4. 若任务未完成,执行状态更新。
//
// 4.1 使用 Model(&model.Task{UserID:userID}) 的目的:
// 让 cache_deleter 在 GORM Update 回调里拿到 user_id从而正确删除任务缓存。
// 4.2 更新条件继续限定 user_id + id避免误更新其他用户数据。
updateResult := dao.db.WithContext(ctx).
Model(&model.Task{UserID: userID}).
Where("id = ? AND user_id = ?", taskID, userID).
Update("is_completed", true)
if updateResult.Error != nil {
return nil, false, updateResult.Error
}
// 5. 极端并发兜底:
// 5.1 若 RowsAffected=0可能是并发请求已先一步更新
// 5.2 此时二次读取任务状态,若已完成则按幂等成功返回,否则视为不存在/异常。
if updateResult.RowsAffected == 0 {
var check model.Task
checkErr := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&check).Error
if checkErr != nil {
return nil, false, checkErr
}
if check.IsCompleted {
return &check, true, nil
}
return nil, false, errors.New("任务状态更新失败")
}
// 6. 返回更新后的快照给 Service 层组装响应。
target.IsCompleted = true
return &target, false, nil
}
// UndoCompleteTaskByID 将指定任务从"已完成"恢复为"未完成"。
//
// 职责边界:
// 1. 只负责当前用户(user_id)下指定 task_id 的状态恢复;
// 2. 若任务本就未完成,按业务要求返回明确错误,不做幂等成功;
// 3. 不负责响应文案拼装(由 Service 层处理)。
//
// 返回语义:
// 1. *model.Task恢复后的任务快照
// 2. error
// 2.1 gorm.ErrRecordNotFound任务不存在或不属于当前用户
// 2.2 respond.TaskNotCompleted任务当前不是"已完成"状态,不能执行取消勾选;
// 2.3 其他 error数据库异常。
func (dao *TaskDAO) UndoCompleteTaskByID(ctx context.Context, userID int, taskID int) (*model.Task, error) {
// 1. 参数兜底:非法 user/task 参数统一按"记录不存在"处理,避免误写。
if userID <= 0 || taskID <= 0 {
return nil, gorm.ErrRecordNotFound
}
// 2. 先读取目标任务,明确区分"不存在"和"状态不允许恢复"。
var target model.Task
findErr := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&target).Error
if findErr != nil {
return nil, findErr
}
// 3. 严格业务约束:若任务当前未完成,直接返回业务错误。
// 3.1 这是本接口和"标记完成"接口的关键差异:这里不做幂等成功。
if !target.IsCompleted {
return nil, respond.TaskNotCompleted
}
// 4. 执行状态恢复is_completed=true -> false
//
// 4.1 使用 Model(&model.Task{UserID:userID}) 的目的是让 cache_deleter 拿到 user_id
// 从而在回调中正确删除该用户任务缓存。
updateResult := dao.db.WithContext(ctx).
Model(&model.Task{UserID: userID}).
Where("id = ? AND user_id = ?", taskID, userID).
Update("is_completed", false)
if updateResult.Error != nil {
return nil, updateResult.Error
}
// 5. 并发兜底:
// 5.1 若 RowsAffected=0说明可能被并发请求先一步恢复
// 5.2 重新读取当前状态,若已是未完成则按业务规则返回"任务未完成"错误。
if updateResult.RowsAffected == 0 {
var check model.Task
checkErr := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&check).Error
if checkErr != nil {
return nil, checkErr
}
if !check.IsCompleted {
return nil, respond.TaskNotCompleted
}
return nil, errors.New("取消任务完成状态失败")
}
// 6. 回填恢复后状态并返回。
target.IsCompleted = false
return &target, nil
}
// PromoteTaskUrgencyByIDs 批量执行"任务紧急性平移"。
//
// 职责边界:
// 1. 只负责把满足条件的任务从"不紧急象限"平移到"紧急象限"
// 1.1 priority=2 -> 1重要不紧急 -> 重要且紧急);
// 1.2 priority=4 -> 3不简单不重要 -> 简单不重要);
// 2. 只更新本次指定 user_id + task_ids 范围内的数据;
// 3. 不负责事件发布、重试去重和缓存策略(由 Service/Outbox 负责)。
//
// 幂等与一致性说明:
// 1. SQL 条件会限制 `is_completed=0`、`urgency_threshold_at<=now`、`priority IN (2,4)`
// 2. 同一批任务重复调用时,已经平移过的记录不会再次更新(幂等);
// 3. 使用 `Model(&model.Task{UserID:userID})` 是为了让 GORM 回调拿到 user_id从而触发 cache_deleter 删除任务缓存。
func (dao *TaskDAO) PromoteTaskUrgencyByIDs(ctx context.Context, userID int, taskIDs []int, now time.Time) (int64, error) {
// 1. 基础兜底:非法 user 或空任务列表直接无操作返回。
if userID <= 0 || len(taskIDs) == 0 {
return 0, nil
}
// 2. 去重并过滤非正数 ID避免无效 where in 条件放大 SQL 噪音。
validTaskIDs := compactPositiveIntIDs(taskIDs)
if len(validTaskIDs) == 0 {
return 0, nil
}
// 3. 条件更新:只更新"已到紧急分界线且仍处于非紧急象限"的任务。
result := dao.db.WithContext(ctx).
Model(&model.Task{UserID: userID}).
Where("user_id = ?", userID).
Where("id IN ?", validTaskIDs).
Where("is_completed = ?", false).
Where("urgency_threshold_at IS NOT NULL AND urgency_threshold_at <= ?", now).
Where("priority IN ?", []int{2, 4}).
Update("priority", gorm.Expr("CASE WHEN priority = 2 THEN 1 WHEN priority = 4 THEN 3 ELSE priority END"))
if result.Error != nil {
return 0, result.Error
}
return result.RowsAffected, nil
}
// UpdateTaskByID 按 task_id + user_id 更新指定字段。
//
// 职责边界:
// 1. 只负责按 updates map 执行 SET 子句更新;
// 2. 不负责业务规则(如优先级范围校验),由 Service 层处理;
// 3. 使用 Model(&model.Task{UserID: userID}) 让 cache_deleter 回调拿到 user_id。
//
// 返回语义:
// 1. *model.Task更新后的完整任务快照
// 2. error
// 2.1 gorm.ErrRecordNotFound任务不存在或不属于当前用户
// 2.2 其他 error数据库异常。
func (dao *TaskDAO) UpdateTaskByID(ctx context.Context, userID int, taskID int, updates map[string]interface{}) (*model.Task, error) {
// 1. 参数兜底:非法参数直接返回"记录不存在"语义。
if userID <= 0 || taskID <= 0 {
return nil, gorm.ErrRecordNotFound
}
// 2. 先查询目标任务,确认存在且归属当前用户。
var target model.Task
findErr := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&target).Error
if findErr != nil {
return nil, findErr
}
// 3. 执行部分字段更新。
// 3.1 使用 Model(&model.Task{UserID: userID}) 触发 cache_deleter。
// 3.2 限定 id + user_id 条件,避免误更新。
updateResult := dao.db.WithContext(ctx).
Model(&model.Task{UserID: userID}).
Where("id = ? AND user_id = ?", taskID, userID).
Updates(updates)
if updateResult.Error != nil {
return nil, updateResult.Error
}
// 4. 更新后重新读取,保证返回完整且一致的快照。
var updated model.Task
if err := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&updated).Error; err != nil {
return nil, err
}
return &updated, nil
}
// DeleteTaskByID 永久删除指定任务(硬删除)。
//
// 职责边界:
// 1. 只负责删除 user_id + task_id 对应的记录;
// 2. 使用 Model(&model.Task{UserID: userID}) 触发 cache_deleter 删除用户任务缓存;
// 3. 不负责级联清理日程tasks 与 schedule_events 无直接外键关联)。
//
// 返回语义:
// 1. *model.Task被删除的任务快照用于响应前端
// 2. error
// 2.1 gorm.ErrRecordNotFound任务不存在或不属于当前用户
// 2.2 其他 error数据库异常。
func (dao *TaskDAO) DeleteTaskByID(ctx context.Context, userID int, taskID int) (*model.Task, error) {
// 1. 参数兜底。
if userID <= 0 || taskID <= 0 {
return nil, gorm.ErrRecordNotFound
}
// 2. 先查询目标任务,确认存在且归属当前用户,同时获取快照用于响应。
var target model.Task
findErr := dao.db.WithContext(ctx).
Where("id = ? AND user_id = ?", taskID, userID).
First(&target).Error
if findErr != nil {
return nil, findErr
}
// 3. 执行硬删除。
// 3.1 使用 Model(&model.Task{UserID: userID}) 触发 cache_deleter。
deleteResult := dao.db.WithContext(ctx).
Model(&model.Task{UserID: userID}).
Where("id = ? AND user_id = ?", taskID, userID).
Delete(&model.Task{})
if deleteResult.Error != nil {
return nil, deleteResult.Error
}
// 4. 并发兜底RowsAffected=0 说明被并发请求先一步删除。
if deleteResult.RowsAffected == 0 {
return nil, gorm.ErrRecordNotFound
}
return &target, nil
}
// compactPositiveIntIDs 对 int 切片做"去重 + 过滤非正数"。
//
// 说明:
// 1. 该函数是 DAO 内部参数清洗工具,不参与任何业务判定;
// 2. 返回结果不保证稳定顺序,对当前 SQL where in 场景无影响。
func compactPositiveIntIDs(ids []int) []int {
seen := make(map[int]struct{}, len(ids))
result := make([]int, 0, len(ids))
for _, id := range ids {
if id <= 0 {
continue
}
if _, exists := seen[id]; exists {
continue
}
seen[id] = struct{}{}
result = append(result, id)
}
return result
}

View File

@@ -0,0 +1,85 @@
package eventsvc
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"gorm.io/gorm"
)
// ActiveScheduleTriggeredProcessor 描述 active_schedule.triggered worker 真正执行业务所需的最小能力。
//
// 职责边界:
// 1. ProcessTriggeredInTx 负责事务内的 trigger -> preview -> notification 编排;
// 2. MarkTriggerFailedBestEffort 负责事务外的失败回写,避免 outbox retry 前完全没有业务态可查;
// 3. 接口本身不限定具体实现,便于迁移期由 active_scheduler 模块独立演进。
type ActiveScheduleTriggeredProcessor interface {
ProcessTriggeredInTx(ctx context.Context, tx *gorm.DB, payload sharedevents.ActiveScheduleTriggeredPayload) error
MarkTriggerFailedBestEffort(ctx context.Context, triggerID string, err error)
}
// RegisterActiveScheduleTriggeredHandler 注册 active_schedule.triggered outbox handler。
//
// 步骤化说明:
// 1. 先做 envelope -> contract DTO 解析与版本校验,明显坏消息直接标记 dead
// 2. 再通过 ConsumeAndMarkConsumed 把“业务落库 + consumed 推进”收敛在同一事务里;
// 3. 若事务返回 error则 best-effort 回写 trigger failed并把错误交给 outbox 做 retry
// 4. 这里不直接 import active_scheduler 的具体实现,避免 service/events 和业务编排层互相反向耦合。
func RegisterActiveScheduleTriggeredHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
processor ActiveScheduleTriggeredProcessor,
) error {
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if processor == nil {
return errors.New("active schedule triggered processor is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, sharedevents.ActiveScheduleTriggeredEventType)
if err != nil {
return err
}
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
if !isAllowedTriggeredEventVersion(envelope.EventVersion) {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, fmt.Sprintf("active_schedule.triggered 版本不受支持: %s", envelope.EventVersion))
return nil
}
var payload sharedevents.ActiveScheduleTriggeredPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析 active_schedule.triggered 载荷失败: "+unmarshalErr.Error())
return nil
}
if validateErr := payload.Validate(); validateErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "active_schedule.triggered 载荷非法: "+validateErr.Error())
return nil
}
err := eventOutboxRepo.ConsumeAndMarkConsumed(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
return processor.ProcessTriggeredInTx(ctx, tx, payload)
})
if err != nil {
processor.MarkTriggerFailedBestEffort(ctx, payload.TriggerID, err)
return err
}
return nil
}
return bus.RegisterEventHandler(sharedevents.ActiveScheduleTriggeredEventType, handler)
}
func isAllowedTriggeredEventVersion(version string) bool {
version = strings.TrimSpace(version)
return version == "" || version == sharedevents.ActiveScheduleTriggeredEventVersion
}

View File

@@ -0,0 +1,130 @@
package eventsvc
import (
"context"
"encoding/json"
"errors"
"log"
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"gorm.io/gorm"
"gorm.io/gorm/clause"
)
const (
// EventTypeAgentStateSnapshotPersist 是"agent 状态快照持久化"的业务事件类型。
EventTypeAgentStateSnapshotPersist = "agent.state.snapshot.persist"
)
// AgentStateSnapshotPayload 是 outbox 事件的业务载荷。
type AgentStateSnapshotPayload struct {
ConversationID string `json:"conversation_id"`
UserID int `json:"user_id"`
Phase string `json:"phase"`
SnapshotJSON string `json:"snapshot_json"`
}
// RegisterAgentStateSnapshotHandler 注册"agent 状态快照持久化"消费者处理器。
//
// 职责边界:
// 1. 只负责快照写入 agent_state_snapshot_records 表;
// 2. 使用 upsert 语义,同一 conversation_id 只保留最新快照;
// 3. 通过 outbox 通用消费事务保证"业务写入 + consumed 推进"原子一致。
func RegisterAgentStateSnapshotHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
) error {
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if repoManager == nil {
return errors.New("repo manager is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeAgentStateSnapshotPersist)
if err != nil {
return err
}
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
var payload AgentStateSnapshotPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析快照载荷失败: "+unmarshalErr.Error())
return nil
}
return eventOutboxRepo.ConsumeAndMarkConsumed(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
record := model.AgentStateSnapshotRecord{
ConversationID: payload.ConversationID,
UserID: payload.UserID,
Phase: payload.Phase,
SnapshotJSON: payload.SnapshotJSON,
}
return tx.Clauses(clause.OnConflict{
Columns: []clause.Column{{Name: "conversation_id"}},
DoUpdates: clause.AssignmentColumns([]string{"user_id", "phase", "snapshot_json", "updated_at"}),
}).Create(&record).Error
})
}
return bus.RegisterEventHandler(EventTypeAgentStateSnapshotPersist, handler)
}
// PublishAgentStateSnapshot 发布"agent 状态快照持久化"事件到 outbox。
//
// 设计说明:
// 1. 将快照 JSON 序列化后通过 outbox 异步写入 MySQL
// 2. publisher 为 nil 时静默降级Kafka 未启用场景);
// 3. 发布失败只记日志,不中断主流程。
func PublishAgentStateSnapshot(
ctx context.Context,
publisher outboxinfra.EventPublisher,
snapshot *agentmodel.AgentStateSnapshot,
conversationID string,
userID int,
) {
if publisher == nil {
return
}
if snapshot == nil {
return
}
snapshotJSON, err := json.Marshal(snapshot)
if err != nil {
log.Printf("[WARN] 序列化 agent 状态快照失败 chat=%s: %v", conversationID, err)
return
}
phase := ""
if snapshot.RuntimeState != nil {
cs := snapshot.RuntimeState.EnsureCommonState()
if cs != nil {
phase = string(cs.Phase)
}
}
payload := AgentStateSnapshotPayload{
ConversationID: conversationID,
UserID: userID,
Phase: phase,
SnapshotJSON: string(snapshotJSON),
}
if err := publisher.Publish(ctx, outboxinfra.PublishRequest{
EventType: EventTypeAgentStateSnapshotPersist,
EventVersion: outboxinfra.DefaultEventVersion,
MessageKey: conversationID,
AggregateID: conversationID,
Payload: payload,
}); err != nil {
log.Printf("[WARN] 发布 agent 状态快照事件失败 chat=%s: %v", conversationID, err)
}
}

View File

@@ -0,0 +1,330 @@
package eventsvc
import (
"context"
"encoding/json"
"errors"
"fmt"
"log"
"strings"
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"gorm.io/gorm"
)
const EventTypeAgentTimelinePersistRequested = "agent.timeline.persist.requested"
// RegisterAgentTimelinePersistHandler 注册“会话时间线持久化”消费者处理器。
//
// 职责边界:
// 1. 只负责 timeline 事件,不处理 chat_history 等其他业务消息;
// 2. 只负责注册 handler不负责总线启停
// 3. 通过 outbox 通用消费事务,把“时间线写库 + consumed 推进”放进同一事务;
// 4. 若遇到 seq 唯一键冲突,会先判定是否属于重放幂等,再决定是否补新 seq 并回填 Redis。
func RegisterAgentTimelinePersistHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
agentRepo *dao.AgentDAO,
cacheDAO *dao.CacheDAO,
) error {
// 1. 依赖校验:缺少任一关键依赖都无法安全消费消息。
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if agentRepo == nil {
return errors.New("agent repo is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeAgentTimelinePersistRequested)
if err != nil {
return err
}
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
var payload model.ChatTimelinePersistPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
// 1. payload 无法反序列化属于不可恢复错误,直接标 dead避免无意义重试。
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析时间线持久化载荷失败: "+unmarshalErr.Error())
return nil
}
payload = payload.Normalize()
if !payload.HasValidIdentity() {
// 2. 这里只校验“能否唯一定位一条 timeline 记录”的最小字段集合。
// 3. content / payload_json 是否为空由事件类型自行决定,不在这里一刀切限制。
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "时间线持久化载荷非法: user_id/conversation_id/seq/kind 非法")
return nil
}
refreshCache := false
finalSeq := payload.Seq
// 4. 统一走 outbox 消费事务入口,保证“业务写入成功 -> consumed”原子一致。
err := eventOutboxRepo.ConsumeAndMarkConsumed(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
finalPayload, repaired, persistErr := persistConversationTimelineEventInTx(ctx, tx, agentRepo.WithTx(tx), payload)
if persistErr != nil {
return persistErr
}
refreshCache = repaired
finalSeq = finalPayload.Seq
return nil
})
if err != nil {
return err
}
// 5. 只有发生“seq 冲突且补了新 seq”时才需要重建 Redis timeline。
// 5.1 原因:主链路已经先写过 Redis常规成功无需重复回写。
// 5.2 若发生补 seq不重建会留下旧 seq 的缓存残影,刷新后顺序会错乱。
// 5.3 缓存重建失败只记日志,不能反向把已 consumed 的 outbox 回滚。
if refreshCache {
if refreshErr := rebuildConversationTimelineCache(ctx, agentRepo, cacheDAO, payload.UserID, payload.ConversationID, finalSeq); refreshErr != nil {
log.Printf("重建时间线缓存失败 user=%d chat=%s seq=%d err=%v", payload.UserID, payload.ConversationID, finalSeq, refreshErr)
}
}
return nil
}
return bus.RegisterEventHandler(EventTypeAgentTimelinePersistRequested, handler)
}
// PublishAgentTimelinePersistRequested 发布“会话时间线持久化请求”事件。
//
// 设计目的:
// 1. 让业务层只传 DTO不重复拼事件元数据
// 2. 统一以 conversation_id 作为 MessageKey / AggregateID尽量降低同会话乱序概率
// 3. 发布失败显式返回 error由调用方决定是否中断主链路。
func PublishAgentTimelinePersistRequested(
ctx context.Context,
publisher outboxinfra.EventPublisher,
payload model.ChatTimelinePersistPayload,
) error {
if publisher == nil {
return errors.New("event publisher is nil")
}
payload = payload.Normalize()
if !payload.HasValidIdentity() {
return errors.New("invalid timeline persist payload")
}
return publisher.Publish(ctx, outboxinfra.PublishRequest{
EventType: EventTypeAgentTimelinePersistRequested,
EventVersion: outboxinfra.DefaultEventVersion,
MessageKey: payload.ConversationID,
AggregateID: payload.ConversationID,
Payload: payload,
})
}
// persistConversationTimelineEventInTx 负责在单个事务里完成 timeline 事件写库。
//
// 步骤化说明:
// 1. 先按 payload 原始 seq 尝试写入;
// 2. 若命中 seq 唯一键冲突,先查询同 seq 记录,判断是否属于“重放同一事件”;
// 3. 若不是重放,而是 Redis seq 漂移导致的新旧事件撞 seq则用 max(seq)+1 重新分配;
// 4. 最多修复 3 次,避免异常数据把消费者拖进无限循环。
func persistConversationTimelineEventInTx(
ctx context.Context,
tx *gorm.DB,
agentRepo *dao.AgentDAO,
payload model.ChatTimelinePersistPayload,
) (model.ChatTimelinePersistPayload, bool, error) {
if tx == nil {
return payload, false, errors.New("transaction is nil")
}
if agentRepo == nil {
return payload, false, errors.New("agent repo is nil")
}
working := payload.Normalize()
repaired := false
for attempt := 0; attempt < 3; attempt++ {
if _, _, err := agentRepo.SaveConversationTimelineEvent(ctx, working); err == nil {
return working, repaired, nil
} else if !model.IsTimelineSeqConflictError(err) {
return working, repaired, err
}
// 1. 先判断是否属于“同一条事件被重复消费”。
// 2. 若库里已有记录且字段完全一致,说明前一次其实已经成功落库,本次可视为幂等成功。
// 3. 若字段不一致,再进入“补新 seq”分支避免把真正的新事件吞掉。
existing, findErr := findConversationTimelineEventBySeq(ctx, tx, working.UserID, working.ConversationID, working.Seq)
if findErr == nil && working.MatchesStoredEvent(existing) {
return working, repaired, nil
}
if findErr != nil && !errors.Is(findErr, gorm.ErrRecordNotFound) {
return working, repaired, findErr
}
maxSeq, maxErr := loadConversationTimelineMaxSeq(ctx, tx, working.UserID, working.ConversationID)
if maxErr != nil {
return working, repaired, maxErr
}
working.Seq = maxSeq + 1
repaired = true
}
return working, repaired, fmt.Errorf("timeline seq repair exceeded limit user=%d chat=%s", working.UserID, working.ConversationID)
}
func findConversationTimelineEventBySeq(
ctx context.Context,
tx *gorm.DB,
userID int,
conversationID string,
seq int64,
) (model.AgentTimelineEvent, error) {
var event model.AgentTimelineEvent
err := tx.WithContext(ctx).
Where("user_id = ? AND chat_id = ? AND seq = ?", userID, strings.TrimSpace(conversationID), seq).
Take(&event).Error
return event, err
}
func loadConversationTimelineMaxSeq(
ctx context.Context,
tx *gorm.DB,
userID int,
conversationID string,
) (int64, error) {
var maxSeq int64
err := tx.WithContext(ctx).
Model(&model.AgentTimelineEvent{}).
Where("user_id = ? AND chat_id = ?", userID, strings.TrimSpace(conversationID)).
Select("COALESCE(MAX(seq), 0)").
Scan(&maxSeq).Error
if err != nil {
return 0, err
}
return maxSeq, nil
}
// rebuildConversationTimelineCache 在“补新 seq”后重建 Redis timeline 缓存。
//
// 说明:
// 1. 这里只在缓存存在时执行;未接 Redis 的环境直接跳过即可;
// 2. 需要整表重建而不是只 append 一条,因为旧缓存里已经存在错误 seq 的事件;
// 3. 这里不抽到 agent/sv 复用,是因为 events 不能反向依赖 service否则会形成循环依赖。
func rebuildConversationTimelineCache(
ctx context.Context,
agentRepo *dao.AgentDAO,
cacheDAO *dao.CacheDAO,
userID int,
conversationID string,
finalSeq int64,
) error {
if cacheDAO == nil || agentRepo == nil {
return nil
}
events, err := agentRepo.ListConversationTimelineEvents(ctx, userID, conversationID)
if err != nil {
return err
}
items := buildConversationTimelineCacheItems(events)
if err = cacheDAO.SetConversationTimelineToCache(ctx, userID, conversationID, items); err != nil {
return err
}
if len(items) > 0 {
finalSeq = items[len(items)-1].Seq
}
return cacheDAO.SetConversationTimelineSeq(ctx, userID, conversationID, finalSeq)
}
func buildConversationTimelineCacheItems(events []model.AgentTimelineEvent) []model.GetConversationTimelineItem {
if len(events) == 0 {
return make([]model.GetConversationTimelineItem, 0)
}
items := make([]model.GetConversationTimelineItem, 0, len(events))
for _, event := range events {
item := model.GetConversationTimelineItem{
ID: event.ID,
Seq: event.Seq,
Kind: strings.TrimSpace(event.Kind),
TokensConsumed: event.TokensConsumed,
CreatedAt: event.CreatedAt,
}
if event.Role != nil {
item.Role = strings.TrimSpace(*event.Role)
}
if event.Content != nil {
item.Content = strings.TrimSpace(*event.Content)
}
if event.Payload != nil {
var payload map[string]any
if err := json.Unmarshal([]byte(strings.TrimSpace(*event.Payload)), &payload); err == nil && len(payload) > 0 {
item.Payload = payload
}
}
items = append(items, item)
}
return normalizeConversationTimelineCacheItems(items)
}
func normalizeConversationTimelineCacheItems(items []model.GetConversationTimelineItem) []model.GetConversationTimelineItem {
if len(items) == 0 {
return make([]model.GetConversationTimelineItem, 0)
}
normalized := make([]model.GetConversationTimelineItem, 0, len(items))
for _, item := range items {
role := strings.ToLower(strings.TrimSpace(item.Role))
kind := canonicalizeConversationTimelineKind(item.Kind, role)
if kind == "" {
switch role {
case "user":
kind = model.AgentTimelineKindUserText
case "assistant":
kind = model.AgentTimelineKindAssistantText
}
}
if role == "" {
switch kind {
case model.AgentTimelineKindUserText:
role = "user"
case model.AgentTimelineKindAssistantText:
role = "assistant"
}
}
item.Kind = kind
item.Role = role
normalized = append(normalized, item)
}
return normalized
}
func canonicalizeConversationTimelineKind(kind string, role string) string {
normalizedKind := strings.ToLower(strings.TrimSpace(kind))
normalizedRole := strings.ToLower(strings.TrimSpace(role))
switch normalizedKind {
case model.AgentTimelineKindUserText,
model.AgentTimelineKindAssistantText,
model.AgentTimelineKindToolCall,
model.AgentTimelineKindToolResult,
model.AgentTimelineKindConfirmRequest,
model.AgentTimelineKindBusinessCard,
model.AgentTimelineKindScheduleCompleted,
model.AgentTimelineKindThinkingSummary:
return normalizedKind
case "text", "message", "query":
if normalizedRole == "user" {
return model.AgentTimelineKindUserText
}
if normalizedRole == "assistant" {
return model.AgentTimelineKindAssistantText
}
}
return normalizedKind
}

View File

@@ -0,0 +1,115 @@
package eventsvc
import (
"context"
"encoding/json"
"errors"
"strconv"
"strings"
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/userauth"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"github.com/LoveLosita/smartflow/backend/shared/ports"
"gorm.io/gorm"
)
const (
// EventTypeChatHistoryPersistRequested 是聊天消息持久化请求的业务事件类型。
EventTypeChatHistoryPersistRequested = "chat.history.persist.requested"
)
// RegisterChatHistoryPersistHandler 注册“聊天消息持久化”消费者。
// 职责边界:
// 1. 只处理聊天历史事件,不处理其它业务事件;
// 2. 只负责注册,不负责总线启动;
// 3. 先写本地 chat 相关表,再调用 userauth 调整 token 额度;
// 4. 当前版本仅注册新路由键,不再注册旧兼容键。
func RegisterChatHistoryPersistHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
adjuster ports.TokenUsageAdjuster,
) error {
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if repoManager == nil {
return errors.New("repo manager is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeChatHistoryPersistRequested)
if err != nil {
return err
}
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
var payload model.ChatHistoryPersistPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析聊天持久化载荷失败: "+unmarshalErr.Error())
return nil
}
eventID := strings.TrimSpace(envelope.EventID)
if eventID == "" {
eventID = strconv.FormatInt(envelope.OutboxID, 10)
}
if err := eventOutboxRepo.ConsumeInTx(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
txM := repoManager.WithTx(tx)
return txM.Agent.SaveChatHistoryInTx(
ctx,
payload.UserID,
payload.ConversationID,
payload.Role,
payload.Message,
payload.ReasoningContent,
payload.ReasoningDurationSeconds,
payload.TokensConsumed,
eventID,
)
}); err != nil {
return err
}
if payload.TokensConsumed > 0 {
if adjuster == nil {
return errors.New("userauth token adjuster is nil")
}
if _, err := adjuster.AdjustTokenUsage(ctx, contracts.AdjustTokenUsageRequest{
EventID: eventID,
UserID: payload.UserID,
TokenDelta: payload.TokensConsumed,
}); err != nil {
return err
}
}
return eventOutboxRepo.MarkConsumed(ctx, envelope.OutboxID)
}
return bus.RegisterEventHandler(EventTypeChatHistoryPersistRequested, handler)
}
// PublishChatHistoryPersistRequested 发布“聊天消息持久化请求”事件。
func PublishChatHistoryPersistRequested(
ctx context.Context,
publisher outboxinfra.EventPublisher,
payload model.ChatHistoryPersistPayload,
) error {
if publisher == nil {
return errors.New("event publisher is nil")
}
return publisher.Publish(ctx, outboxinfra.PublishRequest{
EventType: EventTypeChatHistoryPersistRequested,
EventVersion: outboxinfra.DefaultEventVersion,
MessageKey: payload.ConversationID,
AggregateID: payload.ConversationID,
Payload: payload,
})
}

View File

@@ -0,0 +1,126 @@
package eventsvc
import (
"context"
"encoding/json"
"errors"
"strconv"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/userauth"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"github.com/LoveLosita/smartflow/backend/shared/ports"
"gorm.io/gorm"
)
const (
// EventTypeChatTokenUsageAdjustRequested 是“会话 token 额度调整”事件类型。
// 命名约束:
// 1. 只表达业务语义,不泄露 outbox/kafka 实现细节;
// 2. 作为稳定路由键长期保留,后续演进优先通过 event_version。
EventTypeChatTokenUsageAdjustRequested = "chat.token.usage.adjust.requested"
)
// RegisterChatTokenUsageAdjustHandler 注册“会话 token 额度调整”消费者。
// 职责边界:
// 1. 只处理 token 调整事件,不处理聊天正文落库;
// 2. 先写本地账本,再调用 userauth 侧做额度同步;
// 3. 非法载荷直接标记 dead避免无意义重试。
func RegisterChatTokenUsageAdjustHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
adjuster ports.TokenUsageAdjuster,
) error {
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if repoManager == nil {
return errors.New("repo manager is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeChatTokenUsageAdjustRequested)
if err != nil {
return err
}
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
var payload model.ChatTokenUsageAdjustPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析会话 token 调整载荷失败: "+unmarshalErr.Error())
return nil
}
if payload.UserID <= 0 || payload.TokensDelta <= 0 || payload.ConversationID == "" {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "会话 token 调整载荷无效: user_id/conversation_id/tokens_delta 非法")
return nil
}
eventID := strings.TrimSpace(envelope.EventID)
if eventID == "" {
eventID = strconv.FormatInt(envelope.OutboxID, 10)
}
if err := eventOutboxRepo.ConsumeInTx(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
txM := repoManager.WithTx(tx)
return txM.Agent.AdjustTokenUsageInTx(ctx, payload.UserID, payload.ConversationID, payload.TokensDelta, eventID)
}); err != nil {
return err
}
if adjuster == nil {
return errors.New("userauth token adjuster is nil")
}
if _, err := adjuster.AdjustTokenUsage(ctx, contracts.AdjustTokenUsageRequest{
EventID: eventID,
UserID: payload.UserID,
TokenDelta: payload.TokensDelta,
}); err != nil {
return err
}
return eventOutboxRepo.MarkConsumed(ctx, envelope.OutboxID)
}
return bus.RegisterEventHandler(EventTypeChatTokenUsageAdjustRequested, handler)
}
// PublishChatTokenUsageAdjustRequested 发布“会话 token 额度调整”事件。
// 1. 这里只保证 outbox 写入成功,不等待消费结果;
// 2. 业务层只关心 DTO不关心 outbox/Kafka 细节。
func PublishChatTokenUsageAdjustRequested(
ctx context.Context,
publisher outboxinfra.EventPublisher,
payload model.ChatTokenUsageAdjustPayload,
) error {
if publisher == nil {
return errors.New("event publisher is nil")
}
if payload.UserID <= 0 {
return errors.New("invalid user_id")
}
if payload.TokensDelta <= 0 {
return errors.New("invalid tokens_delta")
}
if payload.ConversationID == "" {
return errors.New("invalid conversation_id")
}
if payload.TriggeredAt.IsZero() {
payload.TriggeredAt = time.Now()
}
return publisher.Publish(ctx, outboxinfra.PublishRequest{
EventType: EventTypeChatTokenUsageAdjustRequested,
EventVersion: outboxinfra.DefaultEventVersion,
MessageKey: payload.ConversationID,
AggregateID: strconv.Itoa(payload.UserID) + ":" + payload.ConversationID,
Payload: payload,
})
}

View File

@@ -0,0 +1,185 @@
package eventsvc
import (
"errors"
"github.com/LoveLosita/smartflow/backend/services/memory"
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"github.com/LoveLosita/smartflow/backend/shared/ports"
)
// RegisterCoreOutboxHandlers 注册单体残留内仍由 agent 边界消费的 outbox handler。
//
// 职责边界:
// 1. 只负责聚合注册当前单体残留内仍归 agent 进程消费的 handler
// 2. 不负责创建 eventBus/outboxRepo/DAO也不负责启动或关闭事件总线。
// 3. 不改变单个 Register* 函数的职责;具体 payload 解析、幂等消费和业务落库仍由各自 handler 负责。
// 4. memory.extract.requested 已在阶段 6 CP1 迁往 cmd/memory这里只登记其路由不再注册消费 handler。
func RegisterCoreOutboxHandlers(
eventBus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
memoryModule *memory.Module,
adjuster ports.TokenUsageAdjuster,
) error {
if err := validateCoreOutboxHandlerDeps(eventBus, outboxRepo, repoManager, agentRepo, cacheRepo); err != nil {
return err
}
if err := RegisterMemoryExtractRoute(); err != nil {
return err
}
return registerOutboxHandlerRoutes(coreOutboxHandlerRoutes(eventBus, outboxRepo, repoManager, agentRepo, cacheRepo, memoryModule, adjuster))
}
// RegisterAllOutboxHandlers 注册当前阶段所有 outbox handler。
//
// 职责边界:
// 1. 只负责把当前单体残留域的 core / active_scheduler 路由一次性接线;
// 2. 不负责创建依赖,也不负责启动事件总线;
// 3. notification 已独立到 cmd/notification自有 outbox consumer 不再由单体注册;
// 4. 供当前启动流程在“总线启动前”统一完成显式路由注册。
func RegisterAllOutboxHandlers(
eventBus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
memoryModule *memory.Module,
activeTriggerWorkflow ActiveScheduleTriggeredProcessor,
adjuster ports.TokenUsageAdjuster,
) error {
if err := validateAllOutboxHandlerDeps(eventBus, outboxRepo, repoManager, agentRepo, cacheRepo, memoryModule, activeTriggerWorkflow); err != nil {
return err
}
return registerOutboxHandlerRoutes(allOutboxHandlerRoutes(
eventBus,
outboxRepo,
repoManager,
agentRepo,
cacheRepo,
memoryModule,
activeTriggerWorkflow,
adjuster,
))
}
// validateCoreOutboxHandlerDeps 校验核心 outbox handler 聚合注册所需依赖。
//
// 职责边界:
// 1. 只做 nil 校验不做数据库、Redis、Kafka 连通性探测,避免注册函数承担启动健康检查职责。
// 2. 返回 error 表示依赖缺失;返回 nil 表示可以安全进入逐项注册流程。
func validateCoreOutboxHandlerDeps(
eventBus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
) error {
if eventBus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if repoManager == nil {
return errors.New("repo manager is nil")
}
if agentRepo == nil {
return errors.New("agent repo is nil")
}
if cacheRepo == nil {
return errors.New("cache repo is nil")
}
return nil
}
// validateAllOutboxHandlerDeps 在核心依赖基础上,额外校验 active_scheduler 相关依赖。
func validateAllOutboxHandlerDeps(
eventBus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
memoryModule *memory.Module,
activeTriggerWorkflow ActiveScheduleTriggeredProcessor,
) error {
if err := validateCoreOutboxHandlerDeps(eventBus, outboxRepo, repoManager, agentRepo, cacheRepo); err != nil {
return err
}
if activeTriggerWorkflow == nil {
return errors.New("active schedule triggered processor is nil")
}
return nil
}
// coreOutboxHandlerRoutes 只描述 core 阶段的 outbox 路由。
func coreOutboxHandlerRoutes(
eventBus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
memoryModule *memory.Module,
adjuster ports.TokenUsageAdjuster,
) []outboxHandlerRoute {
return []outboxHandlerRoute{
{
EventType: EventTypeChatHistoryPersistRequested,
Service: outboxHandlerServiceAgent,
Register: func() error {
return RegisterChatHistoryPersistHandler(eventBus, outboxRepo, repoManager, adjuster)
},
},
{
EventType: EventTypeChatTokenUsageAdjustRequested,
Service: outboxHandlerServiceAgent,
Register: func() error {
return RegisterChatTokenUsageAdjustHandler(eventBus, outboxRepo, repoManager, adjuster)
},
},
{
EventType: EventTypeAgentStateSnapshotPersist,
Service: outboxHandlerServiceAgent,
Register: func() error {
return RegisterAgentStateSnapshotHandler(eventBus, outboxRepo, repoManager)
},
},
{
EventType: EventTypeAgentTimelinePersistRequested,
Service: outboxHandlerServiceAgent,
Register: func() error {
return RegisterAgentTimelinePersistHandler(eventBus, outboxRepo, agentRepo, cacheRepo)
},
},
}
}
// allOutboxHandlerRoutes 把当前阶段所有 outbox 路由一次性展开,供启动入口统一接线。
func allOutboxHandlerRoutes(
eventBus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
agentRepo *dao.AgentDAO,
cacheRepo *dao.CacheDAO,
memoryModule *memory.Module,
activeTriggerWorkflow ActiveScheduleTriggeredProcessor,
adjuster ports.TokenUsageAdjuster,
) []outboxHandlerRoute {
routes := coreOutboxHandlerRoutes(eventBus, outboxRepo, repoManager, agentRepo, cacheRepo, memoryModule, adjuster)
routes = append(routes,
outboxHandlerRoute{
EventType: sharedevents.ActiveScheduleTriggeredEventType,
Service: outboxHandlerServiceActiveScheduler,
Register: func() error {
return RegisterActiveScheduleTriggeredHandler(eventBus, outboxRepo, activeTriggerWorkflow)
},
},
)
return routes
}

View File

@@ -0,0 +1,262 @@
package eventsvc
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"strconv"
"strings"
"time"
"github.com/LoveLosita/smartflow/backend/services/memory"
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"github.com/spf13/viper"
"gorm.io/gorm"
)
const (
// EventTypeMemoryExtractRequested 是“记忆抽取请求”事件类型。
EventTypeMemoryExtractRequested = "memory.extract.requested"
maxMemorySourceTextLength = 1500
)
// RegisterMemoryExtractRoute 只登记 memory.extract.requested 的服务归属。
//
// 职责边界:
// 1. 只保证发布侧能把事件写入 memory_outbox_messages
// 2. 不注册消费 handler消费边界在阶段 6 CP1 起归 cmd/memory
// 3. 重复调用按 outbox 路由注册的幂等语义处理。
func RegisterMemoryExtractRoute() error {
return outboxinfra.RegisterEventService(EventTypeMemoryExtractRequested, outboxinfra.ServiceMemory)
}
// RegisterMemoryExtractRequestedHandler 注册“记忆抽取请求”消费者。
//
// 职责边界:
// 1. 只负责把事件转为 memory_jobs 任务;
// 2. 不在消费回调里执行 LLM 重计算;
// 3. 通过 memory.Module.WithTx(tx) 复用同一套接入门面,保证事务边界仍由 outbox 掌控。
func RegisterMemoryExtractRequestedHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
memoryModule *memory.Module,
) error {
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if memoryModule == nil {
return errors.New("memory module is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeMemoryExtractRequested)
if err != nil {
return err
}
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
var payload model.MemoryExtractRequestedPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析记忆抽取载荷失败: "+unmarshalErr.Error())
return nil
}
if validateErr := validateMemoryExtractPayload(payload); validateErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "记忆抽取载荷非法: "+validateErr.Error())
return nil
}
return eventOutboxRepo.ConsumeAndMarkConsumed(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
jobPayload := memorymodel.ExtractJobPayload{
UserID: payload.UserID,
ConversationID: strings.TrimSpace(payload.ConversationID),
AssistantID: strings.TrimSpace(payload.AssistantID),
RunID: strings.TrimSpace(payload.RunID),
SourceMessageID: payload.SourceMessageID,
SourceRole: strings.TrimSpace(payload.SourceRole),
SourceText: strings.TrimSpace(payload.SourceText),
OccurredAt: payload.OccurredAt,
TraceID: strings.TrimSpace(payload.TraceID),
IdempotencyKey: strings.TrimSpace(payload.IdempotencyKey),
}
return memoryModule.WithTx(tx).EnqueueExtract(ctx, jobPayload, envelope.EventID)
})
}
return bus.RegisterEventHandler(EventTypeMemoryExtractRequested, handler)
}
// EnqueueMemoryExtractRequestedInTx 在事务内写入 memory.extract.requested outbox 消息。
//
// 设计目的:
// 1. 让“聊天消息已落库”和“记忆抽取事件已入队”同事务提交;
// 2. 任意一步失败都整体回滚,避免出现链路断点。
func EnqueueMemoryExtractRequestedInTx(
ctx context.Context,
outboxRepo *outboxinfra.Repository,
maxRetry int,
chatPayload model.ChatHistoryPersistPayload,
) error {
if !isMemoryWriteEnabled() {
return nil
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
memoryPayload, shouldEnqueue := buildMemoryExtractPayloadFromChat(chatPayload)
if !shouldEnqueue {
return nil
}
payloadJSON, err := json.Marshal(memoryPayload)
if err != nil {
return err
}
if maxRetry <= 0 {
maxRetry = 20
}
outboxPayload := outboxinfra.OutboxEventPayload{
EventType: EventTypeMemoryExtractRequested,
EventVersion: outboxinfra.DefaultEventVersion,
AggregateID: strings.TrimSpace(chatPayload.ConversationID),
Payload: payloadJSON,
}
// 1. 这里只传 eventType 与消息键服务归属、outbox 表和 Kafka topic 统一交给仓库路由层解析。
// 2. 这样聊天持久化链路不会继续感知 memory 服务的物理 topic避免拆服务时出现双写口径。
_, err = outboxRepo.CreateMessage(
ctx,
EventTypeMemoryExtractRequested,
strings.TrimSpace(chatPayload.ConversationID),
outboxPayload,
maxRetry,
)
return err
}
// PublishMemoryExtractFromGraph 在 graph 完成后直接发布记忆抽取事件。
//
// 设计目的:
// 1. 绕过 chat-persist 链路,由 agent service 在 graph 完成后按需调用;
// 2. 内部完成 source text 截断、幂等 key 生成、memory 开关检查;
// 3. 发布失败只记日志,不阻断主链路。
func PublishMemoryExtractFromGraph(
ctx context.Context,
publisher outboxinfra.EventPublisher,
userID int,
conversationID string,
sourceText string,
) error {
if !isMemoryWriteEnabled() {
return nil
}
if publisher == nil {
return errors.New("event publisher is nil")
}
sourceText = strings.TrimSpace(sourceText)
if sourceText == "" || userID <= 0 || strings.TrimSpace(conversationID) == "" {
return nil
}
truncated := truncateByRune(sourceText, maxMemorySourceTextLength)
now := time.Now()
payload := model.MemoryExtractRequestedPayload{
UserID: userID,
ConversationID: strings.TrimSpace(conversationID),
SourceRole: "user",
SourceText: truncated,
OccurredAt: now,
IdempotencyKey: buildMemoryExtractIdempotencyKey(userID, conversationID, truncated),
}
return publisher.Publish(ctx, outboxinfra.PublishRequest{
EventType: EventTypeMemoryExtractRequested,
EventVersion: outboxinfra.DefaultEventVersion,
MessageKey: payload.ConversationID,
AggregateID: payload.ConversationID,
Payload: payload,
})
}
func buildMemoryExtractPayloadFromChat(chatPayload model.ChatHistoryPersistPayload) (model.MemoryExtractRequestedPayload, bool) {
role := strings.ToLower(strings.TrimSpace(chatPayload.Role))
if role != "user" {
return model.MemoryExtractRequestedPayload{}, false
}
sourceText := strings.TrimSpace(chatPayload.Message)
if sourceText == "" {
return model.MemoryExtractRequestedPayload{}, false
}
truncatedSourceText := truncateByRune(sourceText, maxMemorySourceTextLength)
now := time.Now()
return model.MemoryExtractRequestedPayload{
UserID: chatPayload.UserID,
ConversationID: strings.TrimSpace(chatPayload.ConversationID),
// Day1 先保留 assistant_id/run_id 空值,后续从主链路上下文补齐。
AssistantID: "",
RunID: "",
SourceMessageID: 0,
SourceRole: role,
SourceText: truncatedSourceText,
OccurredAt: now,
TraceID: "",
IdempotencyKey: buildMemoryExtractIdempotencyKey(chatPayload.UserID, chatPayload.ConversationID, truncatedSourceText),
}, true
}
func validateMemoryExtractPayload(payload model.MemoryExtractRequestedPayload) error {
if payload.UserID <= 0 {
return errors.New("user_id is invalid")
}
if strings.TrimSpace(payload.ConversationID) == "" {
return errors.New("conversation_id is empty")
}
if strings.TrimSpace(payload.SourceRole) == "" {
return errors.New("source_role is empty")
}
if strings.TrimSpace(payload.SourceText) == "" {
return errors.New("source_text is empty")
}
if strings.TrimSpace(payload.IdempotencyKey) == "" {
return errors.New("idempotency_key is empty")
}
return nil
}
func buildMemoryExtractIdempotencyKey(userID int, conversationID, sourceText string) string {
raw := fmt.Sprintf("%d|%s|%s", userID, strings.TrimSpace(conversationID), strings.TrimSpace(sourceText))
sum := sha256.Sum256([]byte(raw))
return "memory_extract_" + strconv.Itoa(userID) + "_" + hex.EncodeToString(sum[:8])
}
func truncateByRune(raw string, max int) string {
if max <= 0 {
return ""
}
runes := []rune(raw)
if len(runes) <= max {
return raw
}
return string(runes[:max])
}
func isMemoryWriteEnabled() bool {
if !viper.IsSet("memory.enabled") {
return true
}
return viper.GetBool("memory.enabled")
}

View File

@@ -0,0 +1,214 @@
package eventsvc
import (
"context"
"errors"
"fmt"
"sort"
"strings"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
)
// OutboxBus 是启动侧和业务侧共享的 outbox 门面。
//
// 职责边界:
// 1. 对上只暴露 Publish / RegisterEventHandler / Start / Close 这四个能力;
// 2. 对内可以按 service 维度路由到多个底层 engine
// 3. 不承载任何业务处理逻辑只做事件归属、topic/group 和 engine 选择。
type OutboxBus interface {
outboxinfra.EventPublisher
RegisterEventHandler(eventType string, handler outboxinfra.MessageHandler) error
Start(ctx context.Context)
Close()
}
type routedOutboxBus struct {
buses map[string]OutboxBus
}
// NewRoutedOutboxBus 把多个 service 级 outbox bus 组装成一个门面。
//
// 1. 这里不创建底层 engine只接收上层已经建好的 service bus
// 2. 事件发布和 handler 注册都按事件归属路由到对应 service
// 3. 任一 service bus 缺失时直接报错,避免静默回落到共享 topic。
func NewRoutedOutboxBus(buses map[string]OutboxBus) OutboxBus {
normalized := make(map[string]OutboxBus, len(buses))
for serviceName, bus := range buses {
serviceName = strings.TrimSpace(serviceName)
if serviceName == "" || bus == nil {
continue
}
normalized[serviceName] = bus
}
if len(normalized) == 0 {
return nil
}
return &routedOutboxBus{buses: normalized}
}
// NewServiceOutboxBus 基于 service 级 topic / group 创建底层 outbox engine。
//
// 1. topic / group 由 service 名称推导,不再要求调用方显式传入共享 topic
// 2. kafka 未启用时返回 nil调用侧可以继续走同步降级路径
// 3. 这里不注册 handler注册仍由启动侧统一完成。
func NewServiceOutboxBus(repo *outboxinfra.Repository, baseCfg kafkabus.Config, serviceName string) (OutboxBus, error) {
if repo == nil {
return nil, errors.New("outbox repository is nil")
}
serviceName = strings.TrimSpace(serviceName)
if serviceName == "" {
return nil, errors.New("serviceName is empty")
}
route, _ := outboxinfra.ResolveServiceRoute(serviceName)
cfg := baseCfg
cfg.Topic = strings.TrimSpace(route.Topic)
cfg.GroupID = strings.TrimSpace(route.GroupID)
cfg.ServiceName = strings.TrimSpace(route.ServiceName)
if cfg.ServiceName == "" {
cfg.ServiceName = serviceName
}
bus, err := outboxinfra.NewEventBus(repo.WithRoute(route), cfg)
if err != nil {
return nil, err
}
if bus == nil {
return nil, nil
}
return bus, nil
}
func (b *routedOutboxBus) Publish(ctx context.Context, req outboxinfra.PublishRequest) error {
serviceBus, err := b.resolveBusByEventType(req.EventType)
if err != nil {
return err
}
return serviceBus.Publish(ctx, req)
}
func (b *routedOutboxBus) RegisterEventHandler(eventType string, handler outboxinfra.MessageHandler) error {
if handler == nil {
return errors.New("handler is nil")
}
serviceBus, err := b.resolveBusByEventType(eventType)
if err != nil {
return err
}
return serviceBus.RegisterEventHandler(eventType, handler)
}
func (b *routedOutboxBus) Start(ctx context.Context) {
if b == nil {
return
}
for _, serviceName := range orderedOutboxServiceNames(b.buses) {
b.buses[serviceName].Start(ctx)
}
}
func (b *routedOutboxBus) Close() {
if b == nil {
return
}
for _, serviceName := range orderedOutboxServiceNames(b.buses) {
b.buses[serviceName].Close()
}
}
func (b *routedOutboxBus) resolveBusByEventType(eventType string) (OutboxBus, error) {
if b == nil {
return nil, errors.New("outbox bus is not initialized")
}
eventType = strings.TrimSpace(eventType)
if eventType == "" {
return nil, errors.New("eventType is empty")
}
serviceName, ok := outboxinfra.ResolveEventService(eventType)
if !ok {
return nil, fmt.Errorf("outbox route not registered: eventType=%s", eventType)
}
serviceBus, ok := b.buses[strings.TrimSpace(serviceName)]
if !ok || serviceBus == nil {
return nil, fmt.Errorf("service outbox bus is missing: service=%s eventType=%s", serviceName, eventType)
}
return serviceBus, nil
}
func orderedOutboxServiceNames(buses map[string]OutboxBus) []string {
ordered := make([]string, 0, len(buses))
seen := make(map[string]struct{}, len(buses))
for _, serviceName := range OutboxServiceNames() {
if _, ok := buses[serviceName]; ok {
ordered = append(ordered, serviceName)
seen[serviceName] = struct{}{}
}
}
extras := make([]string, 0)
for serviceName := range buses {
if _, ok := seen[serviceName]; ok {
continue
}
extras = append(extras, serviceName)
}
sort.Strings(extras)
return append(ordered, extras...)
}
// OutboxServiceNames 返回当前阶段启用的 service 级 outbox 名称。
func OutboxServiceNames() []string {
return []string{
string(outboxHandlerServiceAgent),
string(outboxHandlerServiceMemory),
}
}
// ResolveOutboxTopicForService 把 service 名称映射成独立 Kafka topic。
//
// 1. 这里保留现在的命名风格smartflow.<service>.outbox
// 2. 空 service 只作为兜底,不作为主路径;
// 3. 调用侧不再传共享 topic避免入口继续依赖旧结构。
func ResolveOutboxTopicForService(serviceName string) string {
route, _ := outboxinfra.ResolveServiceRoute(serviceName)
if topic := strings.TrimSpace(route.Topic); topic != "" {
return topic
}
serviceName = strings.TrimSpace(serviceName)
if serviceName == "" {
return kafkabus.DefaultTopic
}
return "smartflow." + serviceName + ".outbox"
}
// ResolveOutboxGroupForService 把 service 名称映射成独立 Kafka group。
func ResolveOutboxGroupForService(serviceName string) string {
route, _ := outboxinfra.ResolveServiceRoute(serviceName)
if groupID := strings.TrimSpace(route.GroupID); groupID != "" {
return groupID
}
serviceName = strings.TrimSpace(serviceName)
if serviceName == "" {
return kafkabus.DefaultGroup
}
return "smartflow-" + serviceName + "-outbox-consumer"
}
// ResolveOutboxTopicForEvent 根据事件归属 service 计算 topic。
func ResolveOutboxTopicForEvent(eventType string) (string, error) {
route, ok := outboxinfra.ResolveEventRoute(eventType)
if !ok {
return "", fmt.Errorf("outbox route not registered: eventType=%s", strings.TrimSpace(eventType))
}
if topic := strings.TrimSpace(route.Topic); topic != "" {
return topic, nil
}
return ResolveOutboxTopicForService(route.ServiceName), nil
}

View File

@@ -0,0 +1,74 @@
package eventsvc
import (
"fmt"
"strings"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
)
// outboxHandlerService 表示 outbox 路由归属的业务服务。
//
// 这里只记录服务归属,不承载具体实现包名,方便在启动日志和路由表里直接阅读。
type outboxHandlerService string
const (
outboxHandlerServiceAgent outboxHandlerService = "agent"
outboxHandlerServiceTask outboxHandlerService = "task"
outboxHandlerServiceMemory outboxHandlerService = "memory"
outboxHandlerServiceActiveScheduler outboxHandlerService = "active-scheduler"
outboxHandlerServiceNotification outboxHandlerService = "notification"
)
// outboxHandlerRoute 显式描述“事件类型 -> 服务归属 -> handler 注册动作”。
//
// 1. EventType 负责唯一定位 outbox 路由键;
// 2. Service 负责标明该路由归属的业务服务;
// 3. Register 只负责把对应 handler 挂到总线,不承载业务逻辑。
type outboxHandlerRoute struct {
EventType string
Service outboxHandlerService
Register func() error
}
// registerOutboxHandlerRoutes 逐条注册路由表里的 handler。
//
// 1. 先把事件类型和服务归属写进路由表,避免启动入口散落多处 if err != nil
// 2. 再统一执行注册动作,保证失败时能直接定位到具体 event_type 和 service
// 3. 若某条路由缺少注册函数,直接返回 error避免静默漏注册。
func registerOutboxHandlerRoutes(routes []outboxHandlerRoute) error {
for _, route := range routes {
if route.Register == nil {
return fmt.Errorf("outbox handler route 缺少注册函数: event_type=%s service=%s", route.EventType, route.Service)
}
if err := outboxinfra.RegisterEventService(route.EventType, string(route.Service)); err != nil {
return fmt.Errorf("登记 outbox 事件归属失败event_type=%s, service=%s: %w", route.EventType, route.Service, err)
}
if err := route.Register(); err != nil {
return fmt.Errorf("注册 outbox handler 失败event_type=%s, service=%s: %w", route.EventType, route.Service, err)
}
}
return nil
}
// scopedOutboxRepoForEvent 负责把通用 outbox 仓库收敛到某个事件所属的服务表。//
// 职责边界:
// 1. 只做事件->服务->表的路由,不碰业务写入语义;
// 2. 返回的仓库只适合当前事件的 MarkDead / ConsumeAndMarkConsumed / MarkFailedForRetry
// 3. 路由缺失时直接返回错误,避免默默写回默认表。
func scopedOutboxRepoForEvent(outboxRepo *outboxinfra.Repository, eventType string) (*outboxinfra.Repository, error) {
if outboxRepo == nil {
return nil, fmt.Errorf("outbox repository is nil")
}
eventType = strings.TrimSpace(eventType)
if eventType == "" {
return nil, fmt.Errorf("eventType is empty")
}
route, ok := outboxinfra.ResolveEventRoute(eventType)
if !ok {
return nil, fmt.Errorf("outbox route not registered: eventType=%s", eventType)
}
return outboxRepo.WithRoute(route), nil
}

View File

@@ -0,0 +1,150 @@
package eventsvc
import (
"context"
"encoding/json"
"errors"
"log"
"strconv"
"time"
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
"gorm.io/gorm"
)
const (
// EventTypeTaskUrgencyPromoteRequested 是“任务紧急性平移请求”事件类型。
//
// 命名约束:
// 1. 只表达业务语义,不泄露 Kafka/outbox 技术细节;
// 2. 作为稳定路由键长期保留,后续协议演进优先走 event_version。
EventTypeTaskUrgencyPromoteRequested = "task.urgency.promote.requested"
)
// RegisterTaskUrgencyPromoteRoute 只登记 task 事件归属,不注册消费 handler。
//
// 职责边界:
// 1. 供单体残留路径在迁移期继续把 task 事件写入 task_outbox_messages
// 2. 不创建 consumer也不启动 handler真正消费已迁到 cmd/task
// 3. 重复登记同一归属是幂等操作。
func RegisterTaskUrgencyPromoteRoute() error {
return outboxinfra.RegisterEventService(EventTypeTaskUrgencyPromoteRequested, string(outboxHandlerServiceTask))
}
// RegisterTaskUrgencyPromoteHandler 注册“任务紧急性平移”消费者处理器。
//
// 职责边界:
// 1. 只负责注册 handler不负责启动/关闭事件总线;
// 2. 只处理 `task.urgency.promote.requested` 事件,不处理其他业务事件;
// 3. 通过 `ConsumeAndMarkConsumed` 把“业务更新 + outbox consumed 推进”放进同一事务。
func RegisterTaskUrgencyPromoteHandler(
bus OutboxBus,
outboxRepo *outboxinfra.Repository,
repoManager *dao.RepoManager,
) error {
// 1. 依赖校验:缺少任意关键依赖都不能安全消费消息。
if bus == nil {
return errors.New("event bus is nil")
}
if outboxRepo == nil {
return errors.New("outbox repository is nil")
}
if repoManager == nil {
return errors.New("repo manager is nil")
}
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeTaskUrgencyPromoteRequested)
if err != nil {
return err
}
// 2. 定义统一处理函数。
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
// 2.1 先解析 payload解析失败属于不可恢复错误直接标记 dead。
var payload model.TaskUrgencyPromoteRequestedPayload
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析任务紧急性平移载荷失败: "+unmarshalErr.Error())
return nil
}
// 2.2 做轻量参数净化,避免脏数据进入 DAO。
payload.TaskIDs = sanitizePositiveUniqueIntIDs(payload.TaskIDs)
if payload.UserID <= 0 || len(payload.TaskIDs) == 0 {
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "任务紧急性平移载荷无效: user_id 或 task_ids 非法")
return nil
}
// 2.3 统一走 outbox 消费事务入口,保证“业务成功 -> consumed”原子一致。
return eventOutboxRepo.ConsumeAndMarkConsumed(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
// 2.3.1 基于同一 tx 构造 RepoManager复用现有跨 DAO 事务模式。
txM := repoManager.WithTx(tx)
// 2.3.2 以消费时刻为准做条件更新,确保“到线”判定与真实落库时刻一致。
updated, err := txM.Task.PromoteTaskUrgencyByIDs(ctx, payload.UserID, payload.TaskIDs, time.Now())
if err != nil {
return err
}
log.Printf("任务紧急性平移消费完成: user_id=%d task_count=%d affected=%d outbox_id=%d", payload.UserID, len(payload.TaskIDs), updated, envelope.OutboxID)
return nil
})
}
// 3. 注册事件处理器。
return bus.RegisterEventHandler(EventTypeTaskUrgencyPromoteRequested, handler)
}
// PublishTaskUrgencyPromoteRequested 发布“任务紧急性平移请求”事件。
//
// 职责边界:
// 1. 只负责把业务 DTO 发布到 outbox不负责等待消费结果
// 2. 若发布失败,返回 error 交给调用方决定是否降级或重试。
func PublishTaskUrgencyPromoteRequested(
ctx context.Context,
publisher outboxinfra.EventPublisher,
payload model.TaskUrgencyPromoteRequestedPayload,
) error {
if publisher == nil {
return errors.New("event publisher is nil")
}
if payload.UserID <= 0 {
return errors.New("invalid user_id")
}
payload.TaskIDs = sanitizePositiveUniqueIntIDs(payload.TaskIDs)
if len(payload.TaskIDs) == 0 {
return errors.New("task_ids is empty")
}
if payload.TriggeredAt.IsZero() {
payload.TriggeredAt = time.Now()
}
return publisher.Publish(ctx, outboxinfra.PublishRequest{
EventType: EventTypeTaskUrgencyPromoteRequested,
EventVersion: outboxinfra.DefaultEventVersion,
// 这里使用 user_id 作为消息键,确保同一用户相关平移事件尽量落到同一分区,降低乱序概率。
MessageKey: strconv.Itoa(payload.UserID),
AggregateID: strconv.Itoa(payload.UserID),
Payload: payload,
})
}
// sanitizePositiveUniqueIntIDs 过滤非正数并去重。
//
// 说明:
// 1. 该函数只做参数净化,不承载业务判定;
// 2. 不保证顺序稳定,对当前 SQL where in 语义无影响。
func sanitizePositiveUniqueIntIDs(ids []int) []int {
seen := make(map[int]struct{}, len(ids))
result := make([]int, 0, len(ids))
for _, id := range ids {
if id <= 0 {
continue
}
if _, exists := seen[id]; exists {
continue
}
seen[id] = struct{}{}
result = append(result, id)
}
return result
}

View File

@@ -0,0 +1,208 @@
package model
import (
"time"
"gorm.io/gorm"
)
const (
// ActiveScheduleJobStatusPending 表示 job 已创建,等待到达 trigger_at 后扫描。
ActiveScheduleJobStatusPending = "pending"
// ActiveScheduleJobStatusTriggered 表示 job 已生成正式 trigger后续由 trigger 串联状态。
ActiveScheduleJobStatusTriggered = "triggered"
// ActiveScheduleJobStatusCanceled 表示任务已完成或被取消job 不再触发。
ActiveScheduleJobStatusCanceled = "canceled"
// ActiveScheduleJobStatusSkipped 表示扫描时发现已无需主动调度。
ActiveScheduleJobStatusSkipped = "skipped"
// ActiveScheduleJobStatusFailed 表示扫描或触发写入失败,保留错误供重试/排障。
ActiveScheduleJobStatusFailed = "failed"
)
const (
// ActiveScheduleTriggerStatusPending 表示触发信号已持久化,等待 worker 消费。
ActiveScheduleTriggerStatusPending = "pending"
// ActiveScheduleTriggerStatusProcessing 表示 worker 正在处理该触发信号。
ActiveScheduleTriggerStatusProcessing = "processing"
// ActiveScheduleTriggerStatusPreviewGenerated 表示已生成可查询的预览。
ActiveScheduleTriggerStatusPreviewGenerated = "preview_generated"
// ActiveScheduleTriggerStatusSkipped 表示本次触发被判定无需继续处理。
ActiveScheduleTriggerStatusSkipped = "skipped"
// ActiveScheduleTriggerStatusClosed 表示主动观测结论为关闭,不生成预览。
ActiveScheduleTriggerStatusClosed = "closed"
// ActiveScheduleTriggerStatusFailed 表示链路处理失败,可根据错误分类决定是否重试。
ActiveScheduleTriggerStatusFailed = "failed"
// ActiveScheduleTriggerStatusRejected 表示参数或归属校验失败,不进入 pipeline。
ActiveScheduleTriggerStatusRejected = "rejected"
)
const (
// ActiveSchedulePreviewStatusPending 表示预览正在组装,不应展示为可确认。
ActiveSchedulePreviewStatusPending = "pending"
// ActiveSchedulePreviewStatusReady 表示预览可查看、可确认。
ActiveSchedulePreviewStatusReady = "ready"
// ActiveSchedulePreviewStatusApplied 表示用户已确认并成功应用。
ActiveSchedulePreviewStatusApplied = "applied"
// ActiveSchedulePreviewStatusIgnored 表示用户明确忽略本次建议。
ActiveSchedulePreviewStatusIgnored = "ignored"
// ActiveSchedulePreviewStatusExpired 表示预览已过期,不再允许确认。
ActiveSchedulePreviewStatusExpired = "expired"
// ActiveSchedulePreviewStatusFailed 表示预览生成或回写失败。
ActiveSchedulePreviewStatusFailed = "failed"
)
const (
// ActiveScheduleApplyStatusNone 表示尚未发起确认应用。
ActiveScheduleApplyStatusNone = "none"
// ActiveScheduleApplyStatusApplying 表示确认请求正在事务应用中。
ActiveScheduleApplyStatusApplying = "applying"
// ActiveScheduleApplyStatusApplied 表示确认应用成功。
ActiveScheduleApplyStatusApplied = "applied"
// ActiveScheduleApplyStatusFailed 表示应用失败,正式日程不应产生半写状态。
ActiveScheduleApplyStatusFailed = "failed"
// ActiveScheduleApplyStatusRejected 表示请求因过期、幂等冲突等业务规则被拒绝。
ActiveScheduleApplyStatusRejected = "rejected"
// ActiveScheduleApplyStatusExpired 表示预览过期导致不可应用。
ActiveScheduleApplyStatusExpired = "expired"
)
const (
// ActiveScheduleTriggerTypeImportantUrgentTask 是重要且紧急任务到线触发。
ActiveScheduleTriggerTypeImportantUrgentTask = "important_urgent_task"
// ActiveScheduleTriggerTypeUnfinishedFeedback 是用户明确反馈已排任务未完成触发。
ActiveScheduleTriggerTypeUnfinishedFeedback = "unfinished_feedback"
// ActiveScheduleSourceWorkerDueJob 表示后台到期 job 扫描触发。
ActiveScheduleSourceWorkerDueJob = "worker_due_job"
// ActiveScheduleSourceAPITrigger 表示测试/开发 API 正式触发。
ActiveScheduleSourceAPITrigger = "api_trigger"
// ActiveScheduleSourceAPIDryRun 表示测试/开发 API dry-run不应发布正式事件。
ActiveScheduleSourceAPIDryRun = "api_dry_run"
// ActiveScheduleSourceUserFeedback 表示用户反馈入口触发。
ActiveScheduleSourceUserFeedback = "user_feedback"
// ActiveScheduleTargetTypeTaskPool 表示 target_id 指向 tasks.id。
ActiveScheduleTargetTypeTaskPool = "task_pool"
// ActiveScheduleTargetTypeScheduleEvent 表示 target_id 指向 schedule_events.id。
ActiveScheduleTargetTypeScheduleEvent = "schedule_event"
// ActiveScheduleTargetTypeTaskItem 表示 target_id 指向 task_items.id。
ActiveScheduleTargetTypeTaskItem = "task_item"
)
// ActiveScheduleJob 是主动调度 due job 表模型。
//
// 职责边界:
// 1. 负责记录 task 到达 urgency_threshold_at 后是否需要生成主动调度触发;
// 2. 不负责判断 task 当前是否仍重要且紧急,该判断由 worker 扫描时重新读取真实任务状态;
// 3. 不负责发布 outbox 事件,只保存扫描和排障所需状态。
type ActiveScheduleJob struct {
ID string `gorm:"column:id;type:varchar(64);primaryKey"`
UserID int `gorm:"column:user_id;not null;index:idx_active_jobs_user_status_trigger,priority:1;index:idx_active_jobs_task_status,priority:1"`
TaskID int `gorm:"column:task_id;not null;index:idx_active_jobs_task_status,priority:2;comment:对应 tasks.id"`
TriggerType string `gorm:"column:trigger_type;type:varchar(64);not null;default:'important_urgent_task';comment:触发类型"`
Status string `gorm:"column:status;type:varchar(32);not null;default:'pending';index:idx_active_jobs_user_status_trigger,priority:2;index:idx_active_jobs_task_status,priority:3;comment:pending/triggered/canceled/skipped/failed"`
TriggerAt time.Time `gorm:"column:trigger_at;not null;index:idx_active_jobs_user_status_trigger,priority:3;comment:到期触发时间"`
DedupeKey string `gorm:"column:dedupe_key;type:varchar(191);index:idx_active_jobs_dedupe;comment:触发去重窗口键"`
LastTriggerID *string `gorm:"column:last_trigger_id;type:varchar(64);index:idx_active_jobs_last_trigger;comment:最近一次生成的 trigger_id"`
LastErrorCode *string `gorm:"column:last_error_code;type:varchar(64);comment:最近一次扫描错误码"`
LastError *string `gorm:"column:last_error;type:text;comment:最近一次扫描错误详情"`
LastScannedAt *time.Time `gorm:"column:last_scanned_at;comment:最近一次被 worker 扫描时间"`
TraceID string `gorm:"column:trace_id;type:varchar(64);index:idx_active_jobs_trace_id"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime"`
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;index"`
}
func (ActiveScheduleJob) TableName() string { return "active_schedule_jobs" }
// ActiveScheduleTrigger 是主动调度统一触发信号表模型。
//
// 职责边界:
// 1. 负责持久化 worker/API/用户反馈归一后的触发事实;
// 2. 负责串联 trigger -> preview -> notification -> apply 的审计主线;
// 3. 不承载候选生成、LLM 选择或通知投递的业务实现。
type ActiveScheduleTrigger struct {
ID string `gorm:"column:id;type:varchar(64);primaryKey"`
UserID int `gorm:"column:user_id;not null;index:idx_active_triggers_user_created,priority:1"`
TriggerType string `gorm:"column:trigger_type;type:varchar(64);not null;index:idx_active_triggers_dedupe,priority:2"`
Source string `gorm:"column:source;type:varchar(64);not null;comment:worker_due_job/api_trigger/api_dry_run/user_feedback"`
TargetType string `gorm:"column:target_type;type:varchar(64);not null;index:idx_active_triggers_target,priority:1"`
TargetID int `gorm:"column:target_id;not null;index:idx_active_triggers_target,priority:2"`
FeedbackID string `gorm:"column:feedback_id;type:varchar(128);index:idx_active_triggers_feedback;comment:用户反馈来源ID可为空"`
JobID *string `gorm:"column:job_id;type:varchar(64);index:idx_active_triggers_job_id"`
IdempotencyKey string `gorm:"column:idempotency_key;type:varchar(191);index:idx_active_triggers_idempotency;comment:API/用户反馈幂等键"`
DedupeKey string `gorm:"column:dedupe_key;type:varchar(191);index:idx_active_triggers_dedupe,priority:1;comment:触发去重窗口键"`
Status string `gorm:"column:status;type:varchar(32);not null;default:'pending';index:idx_active_triggers_status_updated,priority:1"`
MockNow *time.Time `gorm:"column:mock_now;comment:测试触发模拟时间"`
IsMockTime bool `gorm:"column:is_mock_time;not null;default:false;comment:是否使用模拟时间"`
RequestedAt time.Time `gorm:"column:requested_at;not null;comment:触发请求时间"`
PayloadJSON *string `gorm:"column:payload_json;type:json;comment:触发来源补充信息"`
PreviewID *string `gorm:"column:preview_id;type:varchar(64);index:idx_active_triggers_preview_id"`
LastErrorCode *string `gorm:"column:last_error_code;type:varchar(64);comment:链路错误码"`
LastError *string `gorm:"column:last_error;type:text;comment:链路错误详情"`
ProcessedAt *time.Time `gorm:"column:processed_at;comment:worker 开始处理时间"`
CompletedAt *time.Time `gorm:"column:completed_at;comment:本触发进入终态时间"`
TraceID string `gorm:"column:trace_id;type:varchar(64);index:idx_active_triggers_trace_id"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime;index:idx_active_triggers_user_created,priority:2"`
UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime;index:idx_active_triggers_status_updated,priority:2"`
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;index"`
}
func (ActiveScheduleTrigger) TableName() string { return "active_schedule_triggers" }
// ActiveSchedulePreview 是主动调度可确认预览表模型。
//
// 职责边界:
// 1. 负责保存主动调度生成的候选、解释、before/after 摘要与过期时间;
// 2. 负责保存一次确认应用的轻量状态,不新增 apply request 表;
// 3. 不负责正式日程写入,正式写入仍由后续 apply/service port 完成。
type ActiveSchedulePreview struct {
ID string `gorm:"column:preview_id;type:varchar(64);primaryKey;uniqueIndex:uk_active_previews_apply_idempotency,priority:1"`
UserID int `gorm:"column:user_id;not null;index:idx_active_previews_user_created_at,priority:1"`
TriggerID string `gorm:"column:trigger_id;type:varchar(64);not null;index:idx_active_previews_trigger_id"`
TriggerType string `gorm:"column:trigger_type;type:varchar(64);not null"`
TargetType string `gorm:"column:target_type;type:varchar(64);not null"`
TargetID int `gorm:"column:target_id;not null"`
Status string `gorm:"column:status;type:varchar(32);not null;default:'pending';comment:pending/ready/applied/ignored/expired/failed"`
SelectedCandidateID string `gorm:"column:selected_candidate_id;type:varchar(64);comment:LLM 或后端 fallback 选中的候选ID"`
CandidateCount int `gorm:"column:candidate_count;not null;default:0"`
SelectedCandidateJSON *string `gorm:"column:selected_candidate_json;type:json"`
CandidatesJSON *string `gorm:"column:candidates_json;type:json"`
DecisionJSON *string `gorm:"column:decision_json;type:json"`
MetricsJSON *string `gorm:"column:metrics_json;type:json"`
IssuesJSON *string `gorm:"column:issues_json;type:json"`
ContextSummaryJSON *string `gorm:"column:context_summary_json;type:json"`
BeforeSummaryJSON *string `gorm:"column:before_summary_json;type:json"`
PreviewChangesJSON *string `gorm:"column:preview_changes_json;type:json"`
AfterSummaryJSON *string `gorm:"column:after_summary_json;type:json"`
RiskJSON *string `gorm:"column:risk_json;type:json"`
ExplanationText string `gorm:"column:explanation_text;type:text"`
NotificationSummary string `gorm:"column:notification_summary;type:text"`
BaseVersion string `gorm:"column:base_version;type:varchar(128);not null;comment:确认前重校验基准版本"`
ExpiresAt time.Time `gorm:"column:expires_at;not null;index:idx_active_previews_expires_at"`
GeneratedAt time.Time `gorm:"column:generated_at;not null"`
ApplyID *string `gorm:"column:apply_id;type:varchar(64);index:idx_active_previews_apply_id"`
ApplyStatus string `gorm:"column:apply_status;type:varchar(32);not null;default:'none';comment:none/applying/applied/failed/rejected/expired"`
ApplyCandidateID string `gorm:"column:apply_candidate_id;type:varchar(64)"`
ApplyIdempotencyKey string `gorm:"column:apply_idempotency_key;type:varchar(191);uniqueIndex:uk_active_previews_apply_idempotency,priority:2"`
ApplyRequestHash string `gorm:"column:apply_request_hash;type:varchar(128);comment:确认请求体摘要"`
AppliedChangesJSON *string `gorm:"column:applied_changes_json;type:json"`
AppliedEventIDsJSON *string `gorm:"column:applied_event_ids_json;type:json"`
ApplyError *string `gorm:"column:apply_error;type:text"`
AppliedAt *time.Time `gorm:"column:applied_at"`
TraceID string `gorm:"column:trace_id;type:varchar(64);index:idx_active_previews_trace_id"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime;index:idx_active_previews_user_created_at,priority:2"`
UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime"`
DeletedAt gorm.DeletedAt `gorm:"column:deleted_at;index"`
}
func (ActiveSchedulePreview) TableName() string { return "active_schedule_previews" }

View File

@@ -0,0 +1,82 @@
package model
import "time"
const (
// ActiveScheduleSessionStatusWaitingUserReply 表示当前会话正在等待用户补充信息,后端应拦截普通聊天。
ActiveScheduleSessionStatusWaitingUserReply = "waiting_user_reply"
// ActiveScheduleSessionStatusRerunning 表示用户已回复,主动调度图正在重跑,后端仍需拦截普通聊天。
ActiveScheduleSessionStatusRerunning = "rerunning"
// ActiveScheduleSessionStatusReadyPreview 表示已生成可展示预览,当前会话可以释放回普通聊天。
ActiveScheduleSessionStatusReadyPreview = "ready_preview"
// ActiveScheduleSessionStatusApplied 表示用户已确认应用,主动调度会话已经收口。
ActiveScheduleSessionStatusApplied = "applied"
// ActiveScheduleSessionStatusIgnored 表示用户明确忽略本次建议。
ActiveScheduleSessionStatusIgnored = "ignored"
// ActiveScheduleSessionStatusExpired 表示会话已过期,不再承担路由管辖权。
ActiveScheduleSessionStatusExpired = "expired"
// ActiveScheduleSessionStatusFailed 表示会话在绑定、重跑或写回过程中失败。
ActiveScheduleSessionStatusFailed = "failed"
)
// ActiveScheduleSession 是“主动调度会话路由桥”的持久化模型。
//
// 职责边界:
// 1. 只保存会话级路由权与轻量状态,不承载 preview 主表的完整业务内容;
// 2. conversation_id 允许在通知前为空,等站内会话绑定完成后再写入;
// 3. state_json 只存轻量状态,避免把重对象和历史消息继续塞进 session 表。
type ActiveScheduleSession struct {
SessionID string `gorm:"column:session_id;type:varchar(64);primaryKey"`
// 1. user_id + conversation_id 用于在聊天入口侧定位当前管辖中的主动调度会话。
// 2. conversation_id 允许为空,因此这里使用可空列,方便先建 session 再绑定会话。
UserID int `gorm:"column:user_id;not null;index:idx_active_schedule_sessions_user_conv,priority:1;index:idx_active_schedule_sessions_user_status_updated,priority:1"`
ConversationID *string `gorm:"column:conversation_id;type:varchar(128);index:idx_active_schedule_sessions_user_conv,priority:2;index:idx_active_schedule_sessions_conversation_status_updated,priority:1"`
// 3. trigger_id / current_preview_id 分别串起触发源与当前预览,方便后续审计和回放。
TriggerID string `gorm:"column:trigger_id;type:varchar(64);not null;index:idx_active_schedule_sessions_trigger_id"`
CurrentPreviewID *string `gorm:"column:current_preview_id;type:varchar(64);index:idx_active_schedule_sessions_preview_id"`
Status string `gorm:"column:status;type:varchar(32);not null;default:'waiting_user_reply';index:idx_active_schedule_sessions_user_status_updated,priority:2;index:idx_active_schedule_sessions_status_updated,priority:1;index:idx_active_schedule_sessions_conversation_status_updated,priority:2"`
StateJSON string `gorm:"column:state_json;type:json;not null"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime;index:idx_active_schedule_sessions_user_status_updated,priority:3;index:idx_active_schedule_sessions_status_updated,priority:2;index:idx_active_schedule_sessions_conversation_status_updated,priority:3"`
}
// TableName 返回主动调度会话表名。
func (ActiveScheduleSession) TableName() string {
return "active_schedule_sessions"
}
// ActiveScheduleSessionState 是 session 表内 state_json 对应的轻量状态。
//
// 职责边界:
// 1. 这里只放“路由和补信息闭环”需要的少量字段;
// 2. 不承载完整 preview、正文历史或大块工具结果
// 3. 便于 cache / DAO 之间直接复用同一份 JSON 语义。
type ActiveScheduleSessionState struct {
PendingQuestion string `json:"pending_question,omitempty"`
MissingInfo []string `json:"missing_info,omitempty"`
LastCandidateID string `json:"last_candidate_id,omitempty"`
LastNotificationID string `json:"last_notification_id,omitempty"`
ExpiresAt *time.Time `json:"expires_at,omitempty"`
FailedReason string `json:"failed_reason,omitempty"`
}
// ActiveScheduleSessionSnapshot 是 service、DAO、cache 之间共享的会话快照 DTO。
//
// 职责边界:
// 1. 负责在三层之间传递强类型会话状态;
// 2. 不负责业务决策,不负责拦截判定;
// 3. DAO 再把它拆成数据库列和 state_jsoncache 则直接按 JSON 存取。
type ActiveScheduleSessionSnapshot struct {
SessionID string
UserID int
ConversationID string
TriggerID string
CurrentPreviewID string
Status string
State ActiveScheduleSessionState
CreatedAt time.Time
UpdatedAt time.Time
}

View File

@@ -0,0 +1,321 @@
package model
import (
"encoding/json"
"fmt"
"strings"
"time"
)
// AgentResumeType 表示本轮请求想恢复哪一类挂起交互。
type AgentResumeType string
const (
AgentResumeTypeAskUser AgentResumeType = "ask_user"
AgentResumeTypeConfirm AgentResumeType = "confirm"
AgentResumeTypeConnectionRecover AgentResumeType = "connection_recover"
)
// AgentResumeAction 表示用户这次恢复请求携带的动作类型。
type AgentResumeAction string
const (
AgentResumeActionReply AgentResumeAction = "reply"
AgentResumeActionApprove AgentResumeAction = "approve"
AgentResumeActionReject AgentResumeAction = "reject"
AgentResumeActionCancel AgentResumeAction = "cancel"
AgentResumeActionResume AgentResumeAction = "resume"
)
// AgentResumeRequest 是 extra.resume 的统一结构。
//
// 设计目的:
// 1. 继续复用现有聊天入口,不再额外新增一条“确认专用接口”;
// 2. 前端只提交“我要恢复哪次交互、这次动作是什么”,不直接改后端 state
// 3. 后端进入聊天主链路前,先读取这份结构,再决定走 confirm / ask_user / connection_recover 哪条恢复路径。
//
// 推荐前端请求形态:
//
// {
// "message": "",
// "extra": {
// "resume": {
// "interaction_id": "xxx",
// "type": "confirm",
// "action": "approve"
// }
// }
// }
//
// TODO(agent/api): 进入聊天主流程前,优先调用 req.ResumeRequest();若命中恢复协议,则不要把本轮请求按普通聊天处理。
type AgentResumeRequest struct {
InteractionID string `json:"interaction_id"`
Type AgentResumeType `json:"type,omitempty"`
Action AgentResumeAction `json:"action"`
}
type UserSendMessageRequest struct {
ConversationID string `json:"conversation_id,omitempty"`
Message string `json:"message" binding:"required"`
Model string `json:"model,omitempty"`
Thinking string `json:"thinking,omitempty"`
Extra map[string]any `json:"extra,omitempty"`
}
// ResumeRequest 从 extra.resume 中解析结构化恢复请求。
//
// 步骤说明:
// 1. 若 extra 或 extra.resume 不存在,则直接返回 nil表示本轮是普通聊天请求
// 2. 先把任意 map/struct 形态统一转成 JSON再反序列化到强类型结构避免入口层到处手写断言
// 3. 解析成功后先做 Normalize再做最小必要校验防止后续业务层拿到脏协议继续流转
// 4. 这里只负责协议解析与基本校验,不负责真正恢复状态,也不负责改 Redis/MySQL。
func (r *UserSendMessageRequest) ResumeRequest() (*AgentResumeRequest, error) {
if r == nil || len(r.Extra) == 0 {
return nil, nil
}
rawResume, ok := r.Extra["resume"]
if !ok || rawResume == nil {
return nil, nil
}
data, err := json.Marshal(rawResume)
if err != nil {
return nil, fmt.Errorf("序列化 extra.resume 失败: %w", err)
}
var resume AgentResumeRequest
if err := json.Unmarshal(data, &resume); err != nil {
return nil, fmt.Errorf("解析 extra.resume 失败: %w", err)
}
resume.Normalize()
if err := resume.Validate(); err != nil {
return nil, err
}
return &resume, nil
}
// Normalize 统一清洗恢复协议中的字符串字段。
func (r *AgentResumeRequest) Normalize() {
if r == nil {
return
}
r.InteractionID = strings.TrimSpace(r.InteractionID)
r.Type = AgentResumeType(strings.TrimSpace(string(r.Type)))
r.Action = AgentResumeAction(strings.TrimSpace(string(r.Action)))
}
// Validate 校验恢复协议的最小合法性。
//
// 职责边界:
// 1. 只校验“是否像一份合法的恢复协议”,不校验 interaction_id 是否真实存在;
// 2. confirm / ask_user / connection_recover 共用一条入口,但动作集合不同,所以这里做显式分流校验;
// 3. 对于 ask_user 回复,真正的回答正文仍建议优先放在顶层 message这里不强制要求额外 answer 字段。
func (r *AgentResumeRequest) Validate() error {
if r == nil {
return nil
}
if r.InteractionID == "" {
return fmt.Errorf("extra.resume.interaction_id 不能为空")
}
if r.Action == "" {
return fmt.Errorf("extra.resume.action 不能为空")
}
switch r.Type {
case "", AgentResumeTypeConfirm:
switch r.Action {
case AgentResumeActionApprove, AgentResumeActionReject, AgentResumeActionCancel:
return nil
default:
return fmt.Errorf("confirm 恢复动作非法: %s", r.Action)
}
case AgentResumeTypeAskUser:
switch r.Action {
case AgentResumeActionReply, AgentResumeActionCancel:
return nil
default:
return fmt.Errorf("ask_user 恢复动作非法: %s", r.Action)
}
case AgentResumeTypeConnectionRecover:
switch r.Action {
case AgentResumeActionResume, AgentResumeActionCancel:
return nil
default:
return fmt.Errorf("connection_recover 恢复动作非法: %s", r.Action)
}
default:
return fmt.Errorf("extra.resume.type 非法: %s", r.Type)
}
}
// IsConfirmResume 判断当前恢复请求是否属于 confirm 分支。
func (r *AgentResumeRequest) IsConfirmResume() bool {
if r == nil {
return false
}
return r.Type == "" || r.Type == AgentResumeTypeConfirm
}
// IsAskUserResume 判断当前恢复请求是否属于 ask_user 分支。
func (r *AgentResumeRequest) IsAskUserResume() bool {
if r == nil {
return false
}
return r.Type == AgentResumeTypeAskUser
}
// IsConnectionRecoverResume 判断当前恢复请求是否属于 connection_recover 分支。
func (r *AgentResumeRequest) IsConnectionRecoverResume() bool {
if r == nil {
return false
}
return r.Type == AgentResumeTypeConnectionRecover
}
type ChatHistoryPersistPayload struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
Role string `json:"role"`
Message string `json:"message"`
ReasoningContent string `json:"reasoning_content,omitempty"`
ReasoningDurationSeconds int `json:"reasoning_duration_seconds,omitempty"`
TokensConsumed int `json:"tokens_consumed"`
}
type ChatTokenUsageAdjustPayload struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
TokensDelta int `json:"tokens_delta"`
Reason string `json:"reason"`
TriggeredAt time.Time `json:"triggered_at"`
}
type GetConversationMetaResponse struct {
ConversationID string `json:"conversation_id"`
Title string `json:"title"`
HasTitle bool `json:"has_title"`
MessageCount int `json:"message_count"`
LastMessageAt *time.Time `json:"last_message_at,omitempty"`
Status string `json:"status"`
}
type GetConversationListItem struct {
ConversationID string `json:"conversation_id"`
Title string `json:"title"`
HasTitle bool `json:"has_title"`
MessageCount int `json:"message_count"`
LastMessageAt *time.Time `json:"last_message_at,omitempty"`
Status string `json:"status"`
CreatedAt *time.Time `json:"created_at,omitempty"`
}
type GetConversationListResponse struct {
List []GetConversationListItem `json:"list"`
Page int `json:"page"`
PageSize int `json:"page_size"`
Limit int `json:"limit"`
Total int64 `json:"total"`
HasMore bool `json:"has_more"`
}
type SchedulePlanPreviewCache struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
TraceID string `json:"trace_id,omitempty"`
Summary string `json:"summary"`
CandidatePlans []UserWeekSchedule `json:"candidate_plans"`
TaskClassIDs []int `json:"task_class_ids,omitempty"`
HybridEntries []HybridScheduleEntry `json:"hybrid_entries,omitempty"`
AllocatedItems []TaskClassItem `json:"allocated_items,omitempty"`
GeneratedAt time.Time `json:"generated_at"`
}
type GetSchedulePlanPreviewResponse struct {
ConversationID string `json:"conversation_id"`
TraceID string `json:"trace_id,omitempty"`
Summary string `json:"summary"`
CandidatePlans []UserWeekSchedule `json:"candidate_plans"`
HybridEntries []HybridScheduleEntry `json:"hybrid_entries,omitempty"`
TaskClassIDs []int `json:"task_class_ids,omitempty"`
GeneratedAt time.Time `json:"generated_at"`
}
type SSEResponse struct {
Event string `json:"event"`
ID int `json:"id,omitempty"`
Retry int64 `json:"retry,omitempty"`
Data SSEMessageData `json:"data"`
}
type SSEMessageData struct {
Step int `json:"step,omitempty"`
Message string `json:"message,omitempty"`
}
type AgentChat struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement;comment:自增ID"`
ChatID string `gorm:"column:chat_id;type:varchar(36);not null;uniqueIndex:uk_chat_id;comment:会话UUID"`
UserID int `gorm:"column:user_id;not null;index:idx_user_last,priority:1;index:idx_user_status,priority:1;comment:所属用户ID"`
Title *string `gorm:"column:title;type:varchar(255);comment:会话标题"`
SystemPrompt *string `gorm:"column:system_prompt;type:text;comment:系统提示词"`
Model *string `gorm:"column:model;type:varchar(100);comment:模型标识"`
MessageCount int `gorm:"column:message_count;not null;default:0;comment:消息总数"`
TokensTotal int `gorm:"column:tokens_total;not null;default:0;comment:累计Token"`
LastHistoryEventID *string `gorm:"column:last_history_event_id;type:varchar(64);comment:最后一次聊天历史持久化事件ID"`
LastTokenAdjustEventID *string `gorm:"column:last_token_adjust_event_id;type:varchar(64);comment:最后一次会话token调整事件ID"`
LastMessageAt *time.Time `gorm:"column:last_message_at;comment:最后消息时间"`
Status string `gorm:"column:status;type:varchar(32);not null;default:active;index:idx_user_status,priority:2;comment:会话状态"`
CompactionSummary *string `gorm:"column:compaction_summary;type:text;comment:历史上下文压缩摘要"`
CompactionWatermark int `gorm:"column:compaction_watermark;not null;default:0;comment:压缩水位线最后被压缩的消息ID"`
ContextTokenStats *string `gorm:"column:context_token_stats;type:json;comment:上下文窗口实时token分布"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt *time.Time `gorm:"column:updated_at;autoUpdateTime"`
DeletedAt *time.Time `gorm:"column:deleted_at;comment:软删除时间"`
}
func (AgentChat) TableName() string { return "agent_chats" }
type ChatHistory struct {
ID int `gorm:"column:id;primaryKey;autoIncrement"`
ChatID string `gorm:"column:chat_id;type:varchar(36);not null;index:idx_user_chat,priority:2;index:idx_chat_id;comment:会话UUID"`
UserID int `gorm:"column:user_id;not null;index:idx_user_chat,priority:1"`
SourceEventID *string `gorm:"column:source_event_id;type:varchar(64);uniqueIndex:uk_chat_history_source_event;comment:来源事件ID"`
MessageContent *string `gorm:"column:message_content;type:text;comment:消息内容"`
ReasoningContent *string `gorm:"column:reasoning_content;type:text;comment:deep reasoning text"`
ReasoningDurationSeconds int `gorm:"column:reasoning_duration_seconds;not null;default:0;comment:deep reasoning duration seconds"`
RetryGroupID *string `gorm:"column:retry_group_id;type:varchar(64);index:idx_retry_group;comment:retry group id"`
RetryIndex *int `gorm:"column:retry_index;comment:retry page index"`
RetryFromUserMessageID *int `gorm:"column:retry_from_user_message_id;comment:source user message id"`
RetryFromAssistantMessageID *int `gorm:"column:retry_from_assistant_message_id;comment:source assistant message id"`
Role *string `gorm:"column:role;type:varchar(32);comment:消息角色"`
TokensConsumed int `gorm:"column:tokens_consumed;not null;default:0;comment:本轮消耗Token"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime"`
Chat AgentChat `gorm:"foreignKey:ChatID;references:ChatID;constraint:OnUpdate:CASCADE,OnDelete:CASCADE"`
}
func (ChatHistory) TableName() string { return "chat_histories" }
// SaveScheduleStatePlacedItem 描述一个已放置的 task_item 的绝对时间位置。
// 与 apply-batch 的 SingleTaskClassItem 格式统一,前端两个按钮共享同一数据格式。
type SaveScheduleStatePlacedItem struct {
TaskItemID int `json:"task_item_id" binding:"required"`
Week int `json:"week" binding:"required,min=1"`
DayOfWeek int `json:"day_of_week" binding:"required,min=1,max=7"`
StartSection int `json:"start_section" binding:"required,min=1"`
EndSection int `json:"end_section" binding:"required,min=1,gtefield=StartSection"`
EmbedCourseEventID int `json:"embed_course_event_id"`
}
// SaveScheduleStateRequest 前端暂存日程调整的请求体。
//
// 职责边界:
// 1. 只承载 conversation_id 和已放置的 task_item 列表(绝对时间格式);
// 2. 后端将绝对坐标转换为 ScheduleState 内部的相对 day_index
// 3. source=event 的课程不受影响,天然过滤。
type SaveScheduleStateRequest struct {
ConversationID string `json:"conversation_id" binding:"required"`
Items []SaveScheduleStatePlacedItem `json:"items" binding:"required,dive,required"`
}

View File

@@ -0,0 +1,85 @@
package model
import "time"
const (
// SchedulePlanStateVersionV1 表示当前 schedule_plan 快照结构版本。
//
// 设计说明:
// 1. 当后续快照字段发生不兼容变更时,版本号用于区分反序列化逻辑;
// 2. 当前版本先固定为 1后续升级时由写入端递增
// 3. 读取端可依据版本做兼容兜底,避免历史快照直接失效。
SchedulePlanStateVersionV1 = 1
)
// AgentScheduleState 是“单用户单会话”的智能排程状态快照持久化模型。
//
// 职责边界:
// 1. 负责保存“可恢复的排程中间状态与最终预览”,用于连续对话微调承接;
// 2. 负责承载结构化 JSON 快照(任务类、混合条目、候选方案等);
// 3. 不负责正式日程落库(正式落库仍走你现有的确认/应用链路);
// 4. 不负责消息总线投递(该快照要求强实时可读,直接写 MySQL
type AgentScheduleState struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
// 1. 一对话一状态:同 user_id + conversation_id 永远只保留最新快照。
// 2. revision 在 upsert 更新时自增,便于排查“同会话被覆盖了几次”。
UserID int `gorm:"column:user_id;not null;uniqueIndex:uk_schedule_state_user_conv,priority:1;index:idx_schedule_state_user_updated,priority:1"`
ConversationID string `gorm:"column:conversation_id;type:varchar(36);not null;uniqueIndex:uk_schedule_state_user_conv,priority:2"`
Revision int `gorm:"column:revision;not null;default:1"`
StateVersion int `gorm:"column:state_version;not null;default:1"`
// 3. 为了避免跨层结构体强耦合,复杂切片统一序列化为 JSON 字符串存储。
TaskClassIDsJSON string `gorm:"column:task_class_ids;type:json;not null"`
ConstraintsJSON string `gorm:"column:constraints;type:json;not null"`
HybridEntriesJSON string `gorm:"column:hybrid_entries;type:json;not null"`
AllocatedItemsJSON string `gorm:"column:allocated_items;type:json;not null"`
CandidatePlansJSON string `gorm:"column:candidate_plans;type:json;not null"`
// 4. 这组字段用于恢复“本轮策略语义”,支持后续在会话内连续微调。
UserIntent string `gorm:"column:user_intent;type:text"`
Strategy string `gorm:"column:strategy;type:varchar(32);not null;default:steady"`
AdjustmentScope string `gorm:"column:adjustment_scope;type:varchar(16);not null;default:large"`
RestartRequested bool `gorm:"column:restart_requested;not null;default:false"`
// 5. 这组字段用于预览展示与链路排障。
FinalSummary string `gorm:"column:final_summary;type:text"`
Completed bool `gorm:"column:completed;not null;default:false"`
TraceID string `gorm:"column:trace_id;type:varchar(64);index:idx_schedule_state_trace_id"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime;index:idx_schedule_state_user_updated,priority:2"`
}
func (AgentScheduleState) TableName() string {
return "agent_schedule_states"
}
// SchedulePlanStateSnapshot 是服务层与 DAO 之间的快照传输结构DTO
//
// 职责边界:
// 1. 负责在 service 与 dao 之间传递“强类型快照”;
// 2. 由 DAO 负责把该结构序列化/反序列化为数据库 JSON 字段;
// 3. 不承载运行期临时字段如并发信号、chan、上下文对象等
type SchedulePlanStateSnapshot struct {
UserID int
ConversationID string
Revision int
StateVersion int
TaskClassIDs []int
Constraints []string
HybridEntries []HybridScheduleEntry
AllocatedItems []TaskClassItem
CandidatePlans []UserWeekSchedule
UserIntent string
Strategy string
AdjustmentScope string
RestartRequested bool
FinalSummary string
Completed bool
TraceID string
UpdatedAt time.Time
}

View File

@@ -0,0 +1,24 @@
package model
import "time"
// AgentStateSnapshotRecord 是 agent 运行态快照的 MySQL 持久化模型。
//
// 设计说明:
// 1. 通过 outbox 异步写入Redis 快照到期后仍可从此表恢复;
// 2. 按 conversation_id 索引,支持按会话查询最近快照;
// 3. phase 字段便于按阶段过滤和清理;
// 4. 不做历史版本管理(覆盖写),同一会话只保留最新快照。
type AgentStateSnapshotRecord struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
ConversationID string `gorm:"column:conversation_id;type:varchar(128);not null;uniqueIndex:idx_conversation_snapshot"`
UserID int `gorm:"column:user_id;not null;index:idx_user_snapshot"`
Phase string `gorm:"column:phase;type:varchar(32);not null"`
SnapshotJSON string `gorm:"column:snapshot_json;type:longtext;not null"`
CreatedAt time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt time.Time `gorm:"column:updated_at;autoUpdateTime"`
}
func (AgentStateSnapshotRecord) TableName() string {
return "agent_state_snapshot_records"
}

View File

@@ -0,0 +1,147 @@
package model
import (
"strings"
"time"
)
// AgentTimelineKind 定义会话时间线事件类型。
//
// 说明:
// 1. 这些类型面向前端渲染,要求语义稳定,不随节点内部实现细节频繁变化;
// 2. 文本消息和卡片事件共用一条时间线,前端只按 seq 顺序渲染;
// 3. token 统计仍以 chat_histories / agent_chats 为准,时间线只负责展示顺序与结构承载。
const (
AgentTimelineKindUserText = "user_text"
AgentTimelineKindAssistantText = "assistant_text"
AgentTimelineKindToolCall = "tool_call"
AgentTimelineKindToolResult = "tool_result"
AgentTimelineKindConfirmRequest = "confirm_request"
AgentTimelineKindBusinessCard = "business_card"
AgentTimelineKindScheduleCompleted = "schedule_completed"
AgentTimelineKindThinkingSummary = "thinking_summary"
)
// AgentTimelineEvent 表示会话里“可展示事件”的统一持久化记录。
//
// 职责边界:
// 1. 只承载“顺序 + 展示信息”,不替代 chat_histories 的消息账本职责;
// 2. seq 是同一会话内的单调递增顺序号,用于刷新后重建展示顺序;
// 3. payload 只保存前端渲染需要的结构化信息,不存整个运行时快照。
type AgentTimelineEvent struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
UserID int `gorm:"column:user_id;not null;uniqueIndex:uk_timeline_user_chat_seq,priority:1;index:idx_timeline_user_chat_created,priority:1;comment:所属用户ID"`
ChatID string `gorm:"column:chat_id;type:varchar(36);not null;uniqueIndex:uk_timeline_user_chat_seq,priority:2;index:idx_timeline_user_chat_created,priority:2;comment:会话UUID"`
Seq int64 `gorm:"column:seq;not null;uniqueIndex:uk_timeline_user_chat_seq,priority:3;comment:会话内顺序号"`
Kind string `gorm:"column:kind;type:varchar(64);not null;comment:事件类型"`
Role *string `gorm:"column:role;type:varchar(32);comment:消息角色"`
Content *string `gorm:"column:content;type:text;comment:正文内容"`
Payload *string `gorm:"column:payload;type:json;comment:结构化负载"`
TokensConsumed int `gorm:"column:tokens_consumed;not null;default:0;comment:该事件关联 token默认 0"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime;index:idx_timeline_user_chat_created,priority:3"`
}
func (AgentTimelineEvent) TableName() string { return "agent_timeline_events" }
// ChatTimelinePersistPayload 定义时间线单条事件落库输入。
//
// 职责边界:
// 1. 只表达一次“写入 agent_timeline_events”的最小字段集合
// 2. Content 面向纯文本类事件,结构化事件更多依赖 PayloadJSON
// 3. thinking_summary 事件要求 PayloadJSON 内只保留 detail_summary 与必要 metadata。
type ChatTimelinePersistPayload struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
Seq int64 `json:"seq"`
Kind string `json:"kind"`
Role string `json:"role,omitempty"`
Content string `json:"content,omitempty"`
PayloadJSON string `json:"payload_json,omitempty"`
TokensConsumed int `json:"tokens_consumed"`
}
// Normalize 负责收敛时间线持久化载荷的基础口径。
//
// 职责边界:
// 1. 只做字符串 trim 和非负数兜底;
// 2. 不负责 thinking_summary 的业务裁剪;
// 3. 返回副本,避免调用方意外修改原对象。
func (p ChatTimelinePersistPayload) Normalize() ChatTimelinePersistPayload {
p.ConversationID = strings.TrimSpace(p.ConversationID)
p.Kind = strings.TrimSpace(p.Kind)
p.Role = strings.TrimSpace(p.Role)
p.Content = strings.TrimSpace(p.Content)
p.PayloadJSON = strings.TrimSpace(p.PayloadJSON)
if p.Seq < 0 {
p.Seq = 0
}
if p.TokensConsumed < 0 {
p.TokensConsumed = 0
}
return p
}
// HasValidIdentity 判断 payload 是否具备最小可持久化主键语义。
func (p ChatTimelinePersistPayload) HasValidIdentity() bool {
normalized := p.Normalize()
return normalized.UserID > 0 &&
normalized.ConversationID != "" &&
normalized.Seq > 0 &&
normalized.Kind != ""
}
// MatchesStoredEvent 判断 payload 与库中事件是否可视为“同一条业务事件”。
//
// 说明:
// 1. 主要用于 outbox 重放时识别“唯一键冲突但其实已经成功落库”的场景;
// 2. 只比较持久化字段,不比较 created_at / id 这类存储侧派生值;
// 3. 返回 true 时,上层可以把 seq 冲突视为幂等成功。
func (p ChatTimelinePersistPayload) MatchesStoredEvent(event AgentTimelineEvent) bool {
normalized := p.Normalize()
return event.UserID == normalized.UserID &&
strings.TrimSpace(event.ChatID) == normalized.ConversationID &&
event.Seq == normalized.Seq &&
strings.TrimSpace(event.Kind) == normalized.Kind &&
trimTimelinePointerString(event.Role) == normalized.Role &&
trimTimelinePointerString(event.Content) == normalized.Content &&
trimTimelinePointerString(event.Payload) == normalized.PayloadJSON &&
event.TokensConsumed == normalized.TokensConsumed
}
// IsTimelineSeqConflictError 判断 error 是否属于时间线 seq 唯一键冲突。
//
// 说明:
// 1. MySQL / PostgreSQL / SQLite 的重复键报错文案并不完全一致,这里用宽松文本匹配;
// 2. 该函数只用于“是否进入幂等/补 seq 分支”的判断,不承担精确错误分类职责;
// 3. 若未来统一抽数据库错误码适配层,应优先替换这里而不是继续复制判断逻辑。
func IsTimelineSeqConflictError(err error) bool {
if err == nil {
return false
}
lower := strings.ToLower(err.Error())
return strings.Contains(lower, "duplicate entry") ||
strings.Contains(lower, "duplicate key") ||
strings.Contains(lower, "unique constraint") ||
strings.Contains(lower, "unique violation") ||
strings.Contains(lower, "error 1062") ||
strings.Contains(lower, "uk_timeline_user_chat_seq")
}
// GetConversationTimelineItem 定义前端读取时间线接口的单条返回项。
type GetConversationTimelineItem struct {
ID int64 `json:"id,omitempty"`
Seq int64 `json:"seq"`
Kind string `json:"kind"`
Role string `json:"role,omitempty"`
Content string `json:"content,omitempty"`
Payload map[string]any `json:"payload,omitempty"`
TokensConsumed int `json:"tokens_consumed,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
}
func trimTimelinePointerString(value *string) string {
if value == nil {
return ""
}
return strings.TrimSpace(*value)
}

View File

@@ -0,0 +1,19 @@
package model
type UserImportCoursesRequest struct {
Courses []UserCheckCourseRequest `json:"courses"`
}
type UserCheckCourseRequest struct {
CourseName string `json:"course_name"`
Location string `json:"location"`
IsAllowTasks bool `json:"is_allow_tasks"`
Arrangements []struct {
StartWeek int `json:"start_week"`
EndWeek int `json:"end_week"`
DayOfWeek int `json:"day_of_week"`
StartSection int `json:"start_section"`
EndSection int `json:"end_section"`
WeekType string `json:"week_type"`
} `json:"arrangements"`
}

View File

@@ -0,0 +1,38 @@
package model
type CourseImageParseDraftStatus string
const (
CourseImageParseDraftStatusSuccess CourseImageParseDraftStatus = "success"
CourseImageParseDraftStatusPartial CourseImageParseDraftStatus = "partial"
CourseImageParseDraftStatusReject CourseImageParseDraftStatus = "reject"
)
type CourseImageParseRow struct {
RowID string `json:"row_id"`
CourseName string `json:"course_name"`
Location string `json:"location"`
IsAllowTasks bool `json:"is_allow_tasks"`
StartWeek *int `json:"start_week"`
EndWeek *int `json:"end_week"`
DayOfWeek *int `json:"day_of_week"`
StartSection *int `json:"start_section"`
EndSection *int `json:"end_section"`
WeekType string `json:"week_type"`
Confidence float64 `json:"confidence"`
RawText string `json:"raw_text"`
RowWarnings []string `json:"row_warnings"`
}
type CourseImageParseResponse struct {
DraftStatus CourseImageParseDraftStatus `json:"draft_status"`
Message string `json:"message"`
Warnings []string `json:"warnings"`
Rows []CourseImageParseRow `json:"rows"`
}
type CourseImageParseRequest struct {
Filename string
MIMEType string
ImageBytes []byte
}

View File

@@ -0,0 +1,165 @@
package model
import "time"
const (
// MemoryItemStatusActive 表示记忆条目可参与检索与注入。
MemoryItemStatusActive = "active"
// MemoryItemStatusArchived 表示记忆条目被归档,不再默认参与注入。
MemoryItemStatusArchived = "archived"
// MemoryItemStatusDeleted 表示记忆条目已软删除。
MemoryItemStatusDeleted = "deleted"
)
const (
// MemoryJobTypeExtract 表示“候选事实抽取”任务。
MemoryJobTypeExtract = "extract"
// MemoryJobTypeEmbed 表示“向量化同步”任务Day1 仅预留)。
MemoryJobTypeEmbed = "embed"
// MemoryJobTypeReconcile 表示“冲突消解”任务Day1 仅预留)。
MemoryJobTypeReconcile = "reconcile"
)
const (
// MemoryJobStatusPending 表示任务待执行。
MemoryJobStatusPending = "pending"
// MemoryJobStatusProcessing 表示任务执行中。
MemoryJobStatusProcessing = "processing"
// MemoryJobStatusSuccess 表示任务执行成功(最终态)。
MemoryJobStatusSuccess = "success"
// MemoryJobStatusFailed 表示任务执行失败但可重试。
MemoryJobStatusFailed = "failed"
// MemoryJobStatusDead 表示任务不可恢复失败(最终态)。
MemoryJobStatusDead = "dead"
)
// MemoryItem 对应 memory_items 表,用于保存长期可注入记忆。
//
// 职责边界:
// 1. 该模型只定义存储结构,不承载抽取/决策业务逻辑;
// 2. Day1 先建表与基础字段Day2 再补读取注入链路;
// 3. 向量字段vector_status/vector_id仅做状态桥接不等于向量库真值。
type MemoryItem struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
UserID int `gorm:"column:user_id;not null;index:idx_memory_items_user_status_type,priority:1;index:idx_memory_items_user_conv_status,priority:1;index:idx_memory_items_user_asst_run_status,priority:1;index:idx_memory_items_user_type_hash,priority:1;comment:用户ID"`
ConversationID *string `gorm:"column:conversation_id;type:varchar(64);index:idx_memory_items_user_conv_status,priority:2;comment:会话ID"`
AssistantID *string `gorm:"column:assistant_id;type:varchar(64);index:idx_memory_items_user_asst_run_status,priority:2;comment:助手ID"`
RunID *string `gorm:"column:run_id;type:varchar(64);index:idx_memory_items_user_asst_run_status,priority:3;comment:运行ID"`
MemoryType string `gorm:"column:memory_type;type:varchar(32);not null;index:idx_memory_items_user_status_type,priority:3;index:idx_memory_items_user_type_hash,priority:2;comment:preference/constraint/fact"`
Title string `gorm:"column:title;type:varchar(128);not null;comment:记忆标题"`
Content string `gorm:"column:content;type:text;not null;comment:记忆内容"`
NormalizedContent *string `gorm:"column:normalized_content;type:text;comment:标准化内容"`
ContentHash *string `gorm:"column:content_hash;type:varchar(64);index:idx_memory_items_user_type_hash,priority:3;comment:幂等去重哈希"`
Confidence float64 `gorm:"column:confidence;type:decimal(5,4);not null;default:0.6;comment:置信度"`
Importance float64 `gorm:"column:importance;type:decimal(5,4);not null;default:0.5;comment:重要度"`
SensitivityLevel int `gorm:"column:sensitivity_level;not null;default:0;comment:敏感级别"`
SourceMessageID *int64 `gorm:"column:source_message_id;index:idx_memory_items_source_message;comment:来源消息ID"`
SourceEventID *string `gorm:"column:source_event_id;type:varchar(64);comment:来源事件ID"`
IsExplicit bool `gorm:"column:is_explicit;not null;default:false;comment:是否显式记忆"`
Status string `gorm:"column:status;type:varchar(16);not null;default:active;index:idx_memory_items_user_status_type,priority:2;index:idx_memory_items_user_conv_status,priority:3;index:idx_memory_items_user_asst_run_status,priority:4;comment:active/archived/deleted"`
TTLAt *time.Time `gorm:"column:ttl_at;index:idx_memory_items_ttl;comment:过期时间"`
LastAccessAt *time.Time `gorm:"column:last_access_at;comment:最后访问时间"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt *time.Time `gorm:"column:updated_at;autoUpdateTime"`
VectorStatus string `gorm:"column:vector_status;type:varchar(16);not null;default:pending;comment:pending/synced/failed"`
VectorID *string `gorm:"column:vector_id;type:varchar(128);comment:向量库映射ID"`
}
func (MemoryItem) TableName() string {
return "memory_items"
}
// MemoryJob 对应 memory_jobs 表,用于承接异步任务。
//
// 职责边界:
// 1. 该表是“可重试状态机”,不是业务事实库;
// 2. payload_json 只存任务执行最小上下文;
// 3. status/retry_count/next_retry_at 组合定义可重试行为。
type MemoryJob struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
UserID int `gorm:"column:user_id;not null;index:idx_memory_jobs_user_created,priority:1;comment:用户ID"`
ConversationID *string `gorm:"column:conversation_id;type:varchar(64);comment:会话ID"`
SourceMessageID *int64 `gorm:"column:source_message_id;comment:来源消息ID"`
SourceEventID *string `gorm:"column:source_event_id;type:varchar(64);index:idx_memory_jobs_source_event;comment:来源事件ID"`
JobType string `gorm:"column:job_type;type:varchar(32);not null;comment:extract/embed/reconcile"`
IdempotencyKey string `gorm:"column:idempotency_key;type:varchar(128);not null;uniqueIndex:uk_memory_jobs_idempotency;comment:幂等键"`
PayloadJSON string `gorm:"column:payload_json;type:longtext;not null;comment:任务载荷JSON"`
Status string `gorm:"column:status;type:varchar(16);not null;index:idx_memory_jobs_status_next,priority:1;comment:pending/processing/success/failed/dead"`
RetryCount int `gorm:"column:retry_count;not null;default:0;comment:已重试次数"`
MaxRetry int `gorm:"column:max_retry;not null;default:6;comment:最大重试次数"`
NextRetryAt *time.Time `gorm:"column:next_retry_at;index:idx_memory_jobs_status_next,priority:2;comment:下次重试时间"`
LastError *string `gorm:"column:last_error;type:varchar(2000);comment:最后错误"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime;index:idx_memory_jobs_user_created,priority:2"`
UpdatedAt *time.Time `gorm:"column:updated_at;autoUpdateTime"`
}
func (MemoryJob) TableName() string {
return "memory_jobs"
}
// MemoryAuditLog 对应 memory_audit_logs 表,用于记忆变更审计。
type MemoryAuditLog struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
MemoryID int64 `gorm:"column:memory_id;not null;index:idx_memory_audit_memory_id;comment:记忆ID"`
UserID int `gorm:"column:user_id;not null;index:idx_memory_audit_user_id;comment:用户ID"`
Operation string `gorm:"column:operation;type:varchar(32);not null;comment:create/update/archive/delete/restore"`
OperatorType string `gorm:"column:operator_type;type:varchar(16);not null;comment:system/user"`
Reason string `gorm:"column:reason;type:varchar(255);not null;default:'';comment:操作原因"`
BeforeJSON *string `gorm:"column:before_json;type:longtext;comment:变更前快照"`
AfterJSON *string `gorm:"column:after_json;type:longtext;comment:变更后快照"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime"`
}
func (MemoryAuditLog) TableName() string {
return "memory_audit_logs"
}
// MemoryUserSetting 对应 memory_user_settings 表,用于用户记忆开关控制。
type MemoryUserSetting struct {
UserID int `gorm:"column:user_id;primaryKey;comment:用户ID"`
MemoryEnabled bool `gorm:"column:memory_enabled;not null;default:true;comment:总开关"`
ImplicitMemoryEnabled bool `gorm:"column:implicit_memory_enabled;not null;default:true;comment:隐式记忆开关"`
SensitiveMemoryEnabled bool `gorm:"column:sensitive_memory_enabled;not null;default:false;comment:敏感记忆开关"`
UpdatedAt *time.Time `gorm:"column:updated_at;autoUpdateTime"`
}
func (MemoryUserSetting) TableName() string {
return "memory_user_settings"
}
// MemoryExtractRequestedPayload 是 memory.extract.requested(v1) 事件载荷。
//
// 说明:
// 1. Day1 先承载最小可执行字段;
// 2. assistant_id/run_id/source_message_id/trace_id 允许为空,后续链路补齐;
// 3. idempotency_key 必填,用于 memory_jobs 去重与无副作用消费。
type MemoryExtractRequestedPayload struct {
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id"`
AssistantID string `json:"assistant_id,omitempty"`
RunID string `json:"run_id,omitempty"`
SourceMessageID int64 `json:"source_message_id,omitempty"`
SourceRole string `json:"source_role"`
SourceText string `json:"source_text"`
OccurredAt time.Time `json:"occurred_at"`
TraceID string `json:"trace_id,omitempty"`
IdempotencyKey string `json:"idempotency_key"`
}

View File

@@ -0,0 +1,105 @@
package model
import "time"
// MemoryGetItemRequest 描述“查看我的某条记忆”所需的最小参数。
type MemoryGetItemRequest struct {
UserID int
MemoryID int64
}
// MemoryCreateItemRequest 描述“手动新增一条记忆”的输入。
type MemoryCreateItemRequest struct {
UserID int `json:"-"`
ConversationID string `json:"conversation_id,omitempty"`
AssistantID string `json:"assistant_id,omitempty"`
RunID string `json:"run_id,omitempty"`
MemoryType string `json:"memory_type"`
Title string `json:"title"`
Content string `json:"content"`
Confidence *float64 `json:"confidence,omitempty"`
Importance *float64 `json:"importance,omitempty"`
SensitivityLevel *int `json:"sensitivity_level,omitempty"`
IsExplicit *bool `json:"is_explicit,omitempty"`
TTLAt *time.Time `json:"ttl_at,omitempty"`
Reason string `json:"reason,omitempty"`
OperatorType string `json:"-"`
}
// MemoryUpdateItemRequest 描述“手动修改一条记忆”的 Patch 输入。
//
// 说明:
// 1. 使用指针区分“未传字段”和“显式传零值”;
// 2. ClearTTL 用于表达“显式清空 ttl_at”
// 3. 当前仍只允许修改内容侧字段,不开放跨用户、跨归属字段改写。
type MemoryUpdateItemRequest struct {
UserID int `json:"-"`
MemoryID int64 `json:"-"`
MemoryType *string `json:"memory_type,omitempty"`
Title *string `json:"title,omitempty"`
Content *string `json:"content,omitempty"`
Confidence *float64 `json:"confidence,omitempty"`
Importance *float64 `json:"importance,omitempty"`
SensitivityLevel *int `json:"sensitivity_level,omitempty"`
IsExplicit *bool `json:"is_explicit,omitempty"`
TTLAt *time.Time `json:"ttl_at,omitempty"`
ClearTTL bool `json:"clear_ttl,omitempty"`
Reason string `json:"reason,omitempty"`
OperatorType string `json:"-"`
}
// MemoryDeleteItemRequest 描述“删除我的一条记忆”的输入。
type MemoryDeleteItemRequest struct {
UserID int
MemoryID int64
Reason string
OperatorType string
}
// MemoryRestoreItemRequest 描述“恢复我的一条记忆”的输入。
type MemoryRestoreItemRequest struct {
UserID int
MemoryID int64
Reason string
OperatorType string
}
// MemoryDedupCleanupRequest 描述离线去重治理任务的执行参数。
type MemoryDedupCleanupRequest struct {
UserID int
Limit int
DryRun bool
Reason string
OperatorType string
}
// MemoryDedupCleanupResult 描述一次离线去重治理的汇总结果。
type MemoryDedupCleanupResult struct {
ScannedGroupCount int `json:"scanned_group_count"`
DedupedGroupCount int `json:"deduped_group_count"`
KeptCount int `json:"kept_count"`
ArchivedCount int `json:"archived_count"`
ArchivedIDs []int64 `json:"archived_ids,omitempty"`
DryRun bool `json:"dry_run"`
}
// MemoryItemView 是前端可见的记忆条目视图。
type MemoryItemView struct {
ID int64 `json:"id"`
UserID int `json:"user_id"`
ConversationID string `json:"conversation_id,omitempty"`
AssistantID string `json:"assistant_id,omitempty"`
RunID string `json:"run_id,omitempty"`
MemoryType string `json:"memory_type"`
Title string `json:"title"`
Content string `json:"content"`
ContentHash string `json:"content_hash,omitempty"`
Confidence float64 `json:"confidence"`
Importance float64 `json:"importance"`
SensitivityLevel int `json:"sensitivity_level"`
IsExplicit bool `json:"is_explicit"`
Status string `json:"status"`
TTLAt *time.Time `json:"ttl_at,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
}

View File

@@ -0,0 +1,48 @@
package model
import "time"
const (
// OutboxStatusPending 表示消息已写入 outbox等待投递或重试窗口到达。
OutboxStatusPending = "pending"
// OutboxStatusPublished 表示消息已成功写入 Kafka但业务消费尚未完成。
OutboxStatusPublished = "published"
// OutboxStatusConsumed 表示消息对应业务处理已成功完成(最终态)。
OutboxStatusConsumed = "consumed"
// OutboxStatusDead 表示达到重试上限或出现不可恢复错误(最终态)。
OutboxStatusDead = "dead"
)
// AgentOutboxMessage 是 outbox 状态机表模型。
//
// 关键说明:
// 1. EventType 映射到数据库 `biz_type` 列(为兼容历史表结构,不改 DDL
// 2. Payload 保存统一事件外壳 JSON
// 3. Status/RetryCount/NextRetryAt 组成重试状态机。
type AgentOutboxMessage struct {
ID int64 `gorm:"column:id;primaryKey;autoIncrement"`
EventType string `gorm:"column:biz_type;type:varchar(64);not null;index:idx_outbox_status_next,priority:3;comment:事件类型"`
ServiceName string `gorm:"column:service_name;type:varchar(64);not null;default:'';index:idx_outbox_service_name,priority:1;comment:所属服务"`
Topic string `gorm:"column:topic;type:varchar(128);not null;comment:Kafka Topic"`
MessageKey string `gorm:"column:message_key;type:varchar(128);not null;comment:Kafka 消息键"`
Payload string `gorm:"column:payload;type:longtext;not null;comment:业务载荷(JSON)"`
Status string `gorm:"column:status;type:varchar(32);not null;index:idx_outbox_status_next,priority:1;comment:pending/published/consumed/dead"`
RetryCount int `gorm:"column:retry_count;not null;default:0;comment:已重试次数"`
MaxRetry int `gorm:"column:max_retry;not null;default:20;comment:最大重试次数"`
NextRetryAt *time.Time `gorm:"column:next_retry_at;index:idx_outbox_status_next,priority:2;comment:下次重试时间"`
LastError *string `gorm:"column:last_error;type:text;comment:最后一次错误"`
PublishedAt *time.Time `gorm:"column:published_at;comment:投递到 Kafka 时间"`
ConsumedAt *time.Time `gorm:"column:consumed_at;comment:消费完成时间"`
CreatedAt *time.Time `gorm:"column:created_at;autoCreateTime"`
UpdatedAt *time.Time `gorm:"column:updated_at;autoUpdateTime"`
}
func (AgentOutboxMessage) TableName() string {
// 1. 这里保留历史兼容默认表名,避免非 outbox 基础设施调用直接失效。
// 2. 服务级多表路由由 backend/infra/outbox 显式通过 db.Table(...) 控制。
// 3. 这样既能兼容旧代码,也不会把共享单表当成终态。
return "agent_outbox_messages"
}

View File

@@ -0,0 +1,194 @@
package model
import "time"
type ScheduleEvent struct {
ID int `gorm:"primaryKey;autoIncrement" json:"id"`
UserID int `gorm:"column:user_id;index:idx_user_events;not null" json:"user_id"`
Name string `gorm:"column:name;type:varchar(255);not null;comment:课程或任务名称" json:"name"`
Location *string `gorm:"column:location;type:varchar(255);default:'';comment:地点 (教学楼/会议室)" json:"location"`
Type string `gorm:"column:type;type:enum('course','task');not null;comment:日程类型" json:"type"`
RelID *int `gorm:"column:rel_id;comment:关联原始数据ID (如教务系统的课程ID)" json:"rel_id"`
// TaskSourceType 标记 type=task 时 rel_id 指向哪个任务来源。
//
// 职责边界:
// 1. 只表达任务日程块的数据来源,不改变 Type 的 course/task 展示语义;
// 2. task_item 表示 rel_id 指向 task_items.idtask_pool 表示 rel_id 指向 tasks.id
// 3. 课程事件保持空值,由迁移回填历史 task 事件,避免影响现有课程逻辑。
TaskSourceType string `gorm:"column:task_source_type;type:varchar(32);not null;default:'';index:idx_schedule_event_task_source;comment:任务来源 task_item/task_pool" json:"task_source_type,omitempty"`
// MakeupForEventID 记录补做块来源事件,用于用户反馈未完成后的审计串联。
//
// 说明:
// 1. 只有主动调度生成补做块时写入;
// 2. 不负责校验目标事件是否仍存在,正式 apply 链路需要在事务内重校验;
// 3. 为空表示普通日程块或非补做块。
MakeupForEventID *int `gorm:"column:makeup_for_event_id;index:idx_schedule_event_makeup_for;comment:补做块对应的原 schedule_event.id" json:"makeup_for_event_id,omitempty"`
// ActivePreviewID 记录主动调度预览来源,方便从正式日程反查触发链路。
//
// 说明:
// 1. 该字段只做审计与排障,不作为正式日程主键;
// 2. preview 详情仍归 active_schedule_previews 表所有。
ActivePreviewID *string `gorm:"column:active_preview_id;type:varchar(64);index:idx_schedule_event_active_preview;comment:主动调度预览ID" json:"active_preview_id,omitempty"`
CanBeEmbedded bool `gorm:"column:can_be_embedded;not null;default:0;comment:是否允许在此时段嵌入其他任务" json:"can_be_embedded"`
StartTime time.Time `gorm:"column:start_time;type:time;comment:开始时间" json:"start_time"`
EndTime time.Time `gorm:"column:end_time;type:time;comment:结束时间" json:"end_time"`
}
type Schedule struct {
ID int `gorm:"primaryKey;autoIncrement" json:"id"`
EventID int `gorm:"column:event_id;index:idx_event_id;not null;comment:关联元数据ID" json:"event_id"`
UserID int `gorm:"column:user_id;uniqueIndex:idx_user_slot_atomic,priority:1;not null;comment:冗余UID方便直接查询" json:"user_id"`
Week int `gorm:"column:week;uniqueIndex:idx_user_slot_atomic,priority:2;not null;comment:周次 (1-25)" json:"week"`
DayOfWeek int `gorm:"column:day_of_week;uniqueIndex:idx_user_slot_atomic,priority:3;not null;comment:星期 (1-7)" json:"day_of_week"`
Section int `gorm:"column:section;uniqueIndex:idx_user_slot_atomic,priority:4;not null;comment:原子化节次 (1-12)" json:"section"`
EmbeddedTaskID *int `gorm:"column:embedded_task_id;comment:若为水课嵌入记录具体的任务项ID" json:"embedded_task_id"`
Status string `gorm:"column:status;type:enum('normal','interrupted');default:'normal';comment:状态: 正常/因故中断" json:"status"`
// 💡 必须加上这一行,告诉 GORM 如何关联元数据
Event *ScheduleEvent `gorm:"foreignKey:EventID" json:"event"`
EmbeddedTask *TaskClassItem `gorm:"foreignKey:EmbeddedTaskID" json:"embedded_task"`
}
type ScheduleConflictDetail struct {
EventID int `json:"event_id"`
Name string `json:"name"`
Location string `json:"location"`
DayOfWeek int `json:"day_of_week"`
Week int `json:"week"`
Sections []int `json:"sections"`
StartSection int `json:"start_section"`
EndSection int `json:"end_section"`
Type string `json:"type"`
EmbeddedTasks []ScheduleEmbeddedTask `json:"embedded_tasks"`
}
type ScheduleEmbeddedTask struct {
Section int `json:"section"`
TaskID int `json:"task_id"`
}
type UserTodaySchedule struct {
DayOfWeek int `json:"day_of_week"`
Week int `json:"week"`
Events []EventBrief `json:"events"`
}
type EventBrief struct {
ID int `json:"id"` // 这个 ID 是 ScheduleEvent 的 ID不是 Schedule 的 ID
Order int `json:"order"` // order 用于区分它们的显示顺序
Name string `json:"name"`
StartTime string `json:"start_time"`
EndTime string `json:"end_time"`
Location string `json:"location"`
Type string `json:"type"`
Span int `json:"span"` // 跨越的节数,给前端用来渲染宽度/高度
EmbeddedTaskInfo TaskBrief `json:"embedded_task_info,omitempty"`
}
type TaskBrief struct {
ID int `json:"id"` // 这个 ID 是 ScheduleEvent 的 ID不是 Schedule 的 ID
Name string `json:"name"`
/*StartTime string `json:"start_time"`
EndTime string `json:"end_time"`*/
Type string `json:"type"`
}
type UserWeekSchedule struct {
Week int `json:"week"`
Events []WeeklyEventBrief `json:"events"`
}
type WeeklyEventBrief struct {
ID int `json:"id"` // 这个 ID 是 ScheduleEvent 的 ID不是 Schedule 的 ID
Order int `json:"order"` // order 用于区分它们在一天中的显示顺序
DayOfWeek int `json:"day_of_week"`
Name string `json:"name"`
StartTime string `json:"start_time"`
EndTime string `json:"end_time"`
Location string `json:"location"`
Type string `json:"type"`
Span int `json:"span"` // 跨越的节数,给前端用来渲染宽度/高度
Status string `json:"status"`
EmbeddedTaskInfo TaskBrief `json:"embedded_task_info,omitempty"`
}
type UserDeleteScheduleEvent struct {
ID int `json:"id"` // 这个 ID 是 ScheduleEvent 的 ID不是 Schedule 的 ID
DeleteCourse bool `json:"delete_course"`
DeleteEmbeddedTask bool `json:"delete_embedded_task"`
}
// UserSmartPlanningMultiRequest 是“多任务类智能粗排”接口的请求体。
//
// 设计说明:
// 1. TaskClassIDs 至少包含 1 个任务类 ID
// 2. 实际业务建议传入 >=2 个,用于多任务类混排;
// 3. 服务层会做去重与合法值过滤,接口层只做基础绑定校验。
type UserSmartPlanningMultiRequest struct {
TaskClassIDs []int `json:"task_class_ids" binding:"required,min=1,dive,min=1"`
}
type UserRecentCompletedScheduleResponse struct {
Events []RecentCompletedEventBrief `json:"events"`
}
type RecentCompletedEventBrief struct {
ID int `json:"id"` //如果是嵌入的任务事件这个ID是TaskClassItem的ID如果是课程事件这个ID是ScheduleEvent的ID
Name string `json:"name"`
Type string `json:"type"`
CompletedTime string `json:"completed_time"`
}
type OngoingSchedule struct {
ID int `json:"id"` // 这个 ID 是 ScheduleEvent 的 ID不是 Schedule 的 ID
Name string `json:"name"`
Location string `json:"location"`
Type string `json:"type"`
TimeStatus string `json:"time_status"` // "upcoming", "ongoing"
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
}
// HybridScheduleEntry 表示"混合日程"中的一个时间块。
//
// 设计目标:
// 将既有日程(课程/已落库任务)与粗排建议的任务统一到同一结构中,
// 供 ReAct 精排引擎在内存中操作。
//
// Status 语义:
// - "existing"已确定的日程LLM 不可移动;
// - "suggested"粗排建议的任务LLM 可通过 Tool 调整时间。
type HybridScheduleEntry struct {
Week int `json:"week"`
DayOfWeek int `json:"day_of_week"`
SectionFrom int `json:"section_from"`
SectionTo int `json:"section_to"`
Name string `json:"name"`
Type string `json:"type"` // "course" | "task"
Status string `json:"status"` // "existing" | "suggested"
TaskItemID int `json:"task_item_id,omitempty"` // 仅 suggested 的 task 有值
TaskClassID int `json:"task_class_id,omitempty"` // 仅 suggested 的 task 有值,对应 TaskClass.ID
EventID int `json:"event_id,omitempty"` // 仅 existing 有值
// CanBeEmbedded 表示该条 existing 课程块是否允许嵌入任务。
// 仅课程条目有意义task 条目默认 false。
CanBeEmbedded bool `json:"can_be_embedded,omitempty"`
// BlockForSuggested 表示该条目是否应当阻塞 suggested 任务占位。
//
// 语义说明:
// 1. suggested 条目默认 true任务之间不能重叠
// 2. existing 课程若是“可嵌入且当前格子未被嵌入任务占用”,则为 false
// 3. existing 课程若不可嵌入,或该格子已有嵌入任务,则为 true。
//
// 该字段用于工具层冲突判断,避免把“可嵌入课位”误判为硬冲突。
BlockForSuggested bool `json:"block_for_suggested,omitempty"`
// ContextTag 是任务认知类型标签,仅在 suggested 任务中使用。
// 该标签用于日内优化时的“认知负荷分配”,例如:
// 1. High-Logic数学、编程、逻辑推理
// 2. Memory记忆/背诵类;
// 3. Review复习/回顾类;
// 4. General通用任务。
ContextTag string `json:"context_tag,omitempty"`
}
func (ScheduleEvent) TableName() string { return "schedule_events" }
func (Schedule) TableName() string { return "schedules" }

View File

@@ -0,0 +1,203 @@
package model
import (
"database/sql/driver"
"encoding/json"
"fmt"
"time"
)
// TaskClass 用于和数据库中的 task_classes 表进行映射
type TaskClass struct {
//section 1
ID int `gorm:"column:id;primaryKey;autoIncrement"`
UserID *int `gorm:"column:user_id;index:idx_task_classes_user_id"`
//section 2
Name *string `gorm:"column:name;size:255"`
Mode *string `gorm:"column:mode;type:enum('auto','manual')"`
StartDate *time.Time `gorm:"column:start_date"`
EndDate *time.Time `gorm:"column:end_date"`
SubjectType *string `gorm:"column:subject_type;size:32;comment:学科类型 quantitative|memory|reading|mixed"`
DifficultyLevel *string `gorm:"column:difficulty_level;size:16;comment:难度等级 low|medium|high"`
CognitiveIntensity *string `gorm:"column:cognitive_intensity;size:16;comment:认知强度 low|medium|high"`
//section 3
TotalSlots *int `gorm:"column:total_slots;comment:分配的总节数"`
AllowFillerCourse *bool `gorm:"column:allow_filler_course;default:true"`
Strategy *string `gorm:"column:strategy;type:enum('steady','rapid')"`
ExcludedSlots IntSlice `gorm:"column:excluded_slots;type:json;comment:不想要的时段切片"`
ExcludedDaysOfWeek IntSlice `gorm:"column:excluded_days_of_week;type:json;comment:不想要的星期几切片(1-7)"`
Items []TaskClassItem `gorm:"foreignKey:CategoryID;references:ID"` // 一对多关联:一个 TaskClass 有多个 TaskClassItem
}
// IntSlice 用于把 []int 以 JSON 形式存入/读出数据库 json 字段
type IntSlice []int
func (s IntSlice) Value() (driver.Value, error) {
// nil -> NULL空切片 -> "[]"
if s == nil {
return nil, nil
}
return json.Marshal([]int(s))
}
func (s *IntSlice) Scan(value any) error {
if value == nil {
*s = nil
return nil
}
var data []byte
switch v := value.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return fmt.Errorf("IntSlice: 不支持的扫描类型: %T", value)
}
var out []int
if err := json.Unmarshal(data, &out); err != nil {
return err
}
*s = IntSlice(out)
return nil
}
// TaskClassItem 用于和数据库中的 task_items 表进行映射
type TaskClassItem struct {
//section 1
ID int `gorm:"column:id;primaryKey;autoIncrement"`
CategoryID *int `gorm:"column:category_id"` //对应 TaskClass 的 ID
//section 2
Order *int `gorm:"column:order"`
Content *string `gorm:"column:content;type:text"`
EmbeddedTime *TargetTime `gorm:"column:embedded_time;type:json;comment:目标时间{date,section_from,section_to}"`
Status *int `gorm:"column:status;comment:1:未安排, 2:已应用"`
}
// UserAddTaskClassRequest 用于处理用户添加任务类别的请求
type UserAddTaskClassRequest struct {
Name string `json:"name" binding:"required"`
StartDate string `json:"start_date" binding:"required"` // YYYY-MM-DD
EndDate string `json:"end_date" binding:"required"` // YYYY-MM-DD
Mode string `json:"mode" binding:"required,oneof=auto manual"`
SubjectType string `json:"subject_type,omitempty"`
DifficultyLevel string `json:"difficulty_level,omitempty"`
CognitiveIntensity string `json:"cognitive_intensity,omitempty"`
Config UserAddTaskClassConfig `json:"config" binding:"required"`
Items []UserAddTaskClassItemRequest `json:"items" binding:"required"`
}
// UserAddTaskClassConfig 用于处理用户添加任务类别时的配置部分
type UserAddTaskClassConfig struct {
TotalSlots int `json:"total_slots" binding:"required,min=1"`
AllowFillerCourse bool `json:"allow_filler_course"`
Strategy string `json:"strategy" binding:"required,oneof=steady rapid"`
ExcludedSlots []int `json:"excluded_slots"`
ExcludedDaysOfWeek []int `json:"excluded_days_of_week"`
}
// UserAddTaskClassItemRequest 用于处理用户添加任务类别时的任务块部分
type UserAddTaskClassItemRequest struct {
ID int `json:"id,omitempty"` // 任务块的数据库主键 ID查询时返回创建时可省略
Order int `json:"order" binding:"required,min=1"`
Content string `json:"content" binding:"required"`
EmbeddedTime *TargetTime `json:"embedded_time"` // 例: 2025-12-22 1-2节; nil 表示未安排
}
// TargetTime 表示任务块的目标时间
type TargetTime struct {
Week int `json:"week"` // 周次
DayOfWeek int `json:"day_of_week"` // 星期几
SectionFrom int `json:"section_from"` // 起始节次
SectionTo int `json:"section_to"` // 结束节次
}
// UserGetTaskClassesResponse 用于返回用户的任务类列表,展示简要信息
type UserGetTaskClassesResponse struct {
TaskClasses []TaskClassSummary `json:"task_classes"`
}
// TaskClassSummary 提供任务类别的简要信息
type TaskClassSummary struct {
ID int `json:"id"`
Name string `json:"name"`
Mode string `json:"mode"`
Strategy string `json:"strategy"`
StartDate time.Time `json:"start_date"`
EndDate time.Time `json:"end_date"`
TotalSlots int `json:"total_slots"`
SubjectType string `json:"subject_type,omitempty"`
DifficultyLevel string `json:"difficulty_level,omitempty"`
CognitiveIntensity string `json:"cognitive_intensity,omitempty"`
}
type UserInsertTaskClassItemToScheduleRequest struct {
Week int `json:"week" binding:"required,min=1"`
DayOfWeek int `json:"day_of_week" binding:"required,min=1,max=7"`
StartSection int `json:"start_section" binding:"required,min=1"`
EndSection int `json:"end_section" binding:"required,min=1,gtefield=StartSection"`
EmbedCourseEventID int `json:"embed_course_event_id"` // 可选,嵌入的课程日程事件 ID
}
type UserInsertTaskClassItemToScheduleRequestBatch struct {
TaskClassID int `json:"task_class_id" binding:"required"`
Items []SingleTaskClassItem `json:"items" binding:"required,dive,required"`
}
type SingleTaskClassItem struct {
TaskItemID int `json:"task_item_id" binding:"required"`
Week int `json:"week" binding:"required,min=1"`
DayOfWeek int `json:"day_of_week" binding:"required,min=1,max=7"`
StartSection int `json:"start_section" binding:"required,min=1"`
EndSection int `json:"end_section" binding:"required,min=1,gtefield=StartSection"`
EmbedCourseEventID int `json:"embed_course_event_id"` // 可选,嵌入的课程日程事件 ID
}
// Value 实现 driver.Valuer 接口,负责将 TargetTime 转换为数据库存储的格式
func (t *TargetTime) Value() (driver.Value, error) {
if t == nil {
return nil, nil
}
// 💡 关键:调用 json.Marshal 将结构体转为 []byte
// 这样 GORM 就能把这一串 JSON 存进数据库的 text/json 字段了
return json.Marshal(t)
}
// Scan 实现 sql.Scanner 接口,负责将数据库中的值转换为 TargetTime 结构体
func (t *TargetTime) Scan(value any) error {
if value == nil {
// 如果数据库是 NULL保持指针对应的对象为零值即可
// 或者在业务层判断 nil
return nil
}
var data []byte
switch v := value.(type) {
case []byte:
data = v
case string:
data = []byte(v)
default:
return fmt.Errorf("TargetTime: 不支持的扫描类型: %T", value)
}
return json.Unmarshal(data, t)
}
// TableName 指定 TaskClass 对应的数据库表名
func (TaskClass) TableName() string {
return "task_classes"
}
// TableName 指定 TaskClassItem 对应的数据库表名
func (TaskClassItem) TableName() string {
return "task_items"
}
// 任务块状态常量
const (
TaskItemStatusUnscheduled = 1 // 未安排
TaskItemStatusApplied = 2 // 已应用
)

View File

@@ -0,0 +1,199 @@
package model
import "time"
// Task 是任务表的领域模型。
//
// 职责边界:
// 1. 负责映射 tasks 表字段;
// 2. 不负责接口入参校验和业务规则判断;
// 3. 不负责"自动平移"执行(自动平移由 Service + Outbox 事件链路负责)。
type Task struct {
// 1. 主键。
ID int `gorm:"primaryKey;autoIncrement"`
// 2. 归属用户 ID。
// 2.1 单列索引用于常规按用户查任务;
// 2.2 同时参与"懒触发平移"复合索引的最左前缀。
UserID int `gorm:"column:user_id;index;index:idx_user_done_threshold_priority,priority:1"`
// 3. 任务标题。
Title string `gorm:"type:varchar(255)"`
// 4. 四象限优先级:
// 4.1 1=重要且紧急;
// 4.2 2=重要不紧急;
// 4.3 3=简单不重要;
// 4.4 4=不简单不重要。
//
// 说明:该字段参与"懒触发平移"复合索引。
Priority int `gorm:"not null;index:idx_user_done_threshold_priority,priority:4"`
// 5. 完成状态。
//
// 说明:已完成任务不参与自动平移;该字段参与复合索引。
IsCompleted bool `gorm:"column:is_completed;default:false;index:idx_user_done_threshold_priority,priority:2"`
// 6. 任务业务截止时间。
DeadlineAt *time.Time `gorm:"column:deadline_at"`
// 7. 紧急分界时间(自动平移阈值)。
//
// 规则:
// 7.1 到达该时间后,任务可从"不紧急象限"自动平移到"紧急象限"
// 7.2 该值由上游(例如 LLM 规划)给出,不在模型层做推断;
// 7.3 为空表示该任务不参与自动平移;
// 7.4 该字段参与"懒触发平移"复合索引。
UrgencyThresholdAt *time.Time `gorm:"column:urgency_threshold_at;index:idx_user_done_threshold_priority,priority:3"`
// 8. 任务预计占用节数。
//
// 说明:
// 8.1 主动调度只消费该字段,不在调度阶段重新推断任务复杂度;
// 8.2 MVP 约定有效范围为 1~4模型层仅提供默认值具体截断由主动调度上下文构造负责
// 8.3 默认 1 节,兼容历史任务与未显式填写的任务。
EstimatedSections int `gorm:"column:estimated_sections;not null;default:1"`
}
// NormalizeEstimatedSections 将预计节数收敛到 MVP 允许范围。
//
// 职责边界:
// 1. 只处理默认值与越界收敛,不判断业务优先级,也不关心调用方来源;
// 2. nil、0、负数统一回退到 1超过 4 的值收敛到 4保证写库与读回口径一致。
func NormalizeEstimatedSections(raw *int) int {
if raw == nil {
return 1
}
value := *raw
if value < 1 {
return 1
}
if value > 4 {
return 4
}
return value
}
type UserAddTaskResponse struct {
ID int `json:"id"`
Title string `json:"title"`
PriorityGroup int `json:"priority_group"`
EstimatedSections int `json:"estimated_sections"`
DeadlineAt *time.Time `json:"deadline_at"`
Status string `json:"status"`
CreatedAt time.Time `json:"created_at"`
}
type UserAddTaskRequest struct {
Title string `json:"title"`
PriorityGroup int `json:"priority_group"`
EstimatedSections int `json:"estimated_sections"`
DeadlineAt *time.Time `json:"deadline_at"`
UrgencyThresholdAt *time.Time `json:"urgency_threshold_at"`
}
// UserCompleteTaskRequest 是"标记任务完成"接口的请求体。
//
// 职责边界:
// 1. 只承载目标任务 ID
// 2. 不承载 user_iduser_id 一律由鉴权中间件注入,避免越权)。
type UserCompleteTaskRequest struct {
TaskID int `json:"task_id"`
}
// UserCompleteTaskResponse 是"标记任务完成"接口的响应体。
//
// 字段语义:
// 1. TaskID本次操作的目标任务
// 2. IsCompleted操作后的完成状态成功时恒为 true
// 3. AlreadyCompleted
// 3.1 true任务原本就已完成本次请求命中幂等语义
// 3.2 false任务由未完成切换为完成
// 4. Status给前端的简短状态文案。
type UserCompleteTaskResponse struct {
TaskID int `json:"task_id"`
IsCompleted bool `json:"is_completed"`
AlreadyCompleted bool `json:"already_completed"`
Status string `json:"status"`
}
// UserUndoCompleteTaskRequest 是"取消任务已完成勾选"接口请求体。
//
// 职责边界:
// 1. 只承载目标 task_id
// 2. 不承载 user_iduser_id 始终由鉴权中间件注入,防止越权操作)。
type UserUndoCompleteTaskRequest struct {
TaskID int `json:"task_id"`
}
// UserUndoCompleteTaskResponse 是"取消任务已完成勾选"接口响应体。
//
// 字段语义:
// 1. TaskID本次操作目标任务
// 2. IsCompleted操作后完成状态成功时恒为 false
// 3. Status给前端的简短状态文案。
type UserUndoCompleteTaskResponse struct {
TaskID int `json:"task_id"`
IsCompleted bool `json:"is_completed"`
Status string `json:"status"`
}
type GetUserTaskResp struct {
ID int `json:"id"`
UserID int `json:"user_id"`
Title string `json:"title"`
PriorityGroup int `json:"priority_group"`
EstimatedSections int `json:"estimated_sections"`
Status string `json:"status"`
Deadline string `json:"deadline"`
IsCompleted bool `json:"is_completed"`
UrgencyThresholdAt string `json:"urgency_threshold_at,omitempty"`
}
// BatchTaskStatusRequest 是任务批量状态查询请求体。
//
// 职责边界:
// 1. 只承载前端从历史卡片中提取的任务 ID 列表;
// 2. 不承载 user_id用户身份必须来自鉴权上下文避免越权查询
// 3. 不表达任务是否必须存在,不存在或无权访问的任务由 Service 静默过滤。
type BatchTaskStatusRequest struct {
IDs []int `json:"ids"`
}
// BatchTaskStatusItem 是单个任务当前完成状态快照。
//
// 说明:
// 1. 当前 Task 模型未维护 UpdatedAt 字段,因此这里只返回可用的 id/is_completed
// 2. 该结构表示"当前状态",不用于反写 NewAgent timeline 历史 payload。
type BatchTaskStatusItem struct {
ID int `json:"id"`
IsCompleted bool `json:"is_completed"`
}
// BatchTaskStatusResponse 是批量任务状态查询响应体。
//
// 职责边界:
// 1. items 只包含当前登录用户有权访问且仍存在的任务;
// 2. ids 为空、非法 ID 全部被过滤、或无匹配任务时items 为空切片而不是业务错误。
type BatchTaskStatusResponse struct {
Items []BatchTaskStatusItem `json:"items"`
}
// UserUpdateTaskRequest 是"更新任务属性"接口的请求体。
//
// 职责边界:
// 1. 指针字段表示"部分更新"语义nil 表示不修改,非 nil 表示更新为指定值;
// 2. TaskID 为必填;
// 3. 不承载 user_id由鉴权中间件注入防止越权
type UserUpdateTaskRequest struct {
TaskID int `json:"task_id"`
Title *string `json:"title"`
PriorityGroup *int `json:"priority_group"`
DeadlineAt *time.Time `json:"deadline_at"`
UrgencyThresholdAt *time.Time `json:"urgency_threshold_at"`
}
// TaskUrgencyPromoteRequestedPayload 是"任务紧急性平移请求"事件载荷。
//
// 职责边界:
// 1. 只承载"哪个用户的哪些任务需要尝试平移"
// 2. 不包含 outbox/kafka 协议字段(这些由基础设施层统一封装);
// 3. TriggeredAt 只用于追踪触发时间,最终是否更新仍以消费时数据库条件为准。
type TaskUrgencyPromoteRequestedPayload struct {
UserID int `json:"user_id"`
TaskIDs []int `json:"task_ids"`
TriggeredAt time.Time `json:"triggered_at"`
}