Version: 0.9.69.dev.260504
后端: 1. 阶段 4 active-scheduler 服务边界落地,新增 `cmd/active-scheduler`、`services/active_scheduler`、`shared/contracts/activescheduler` 和 active-scheduler port,迁移 dry-run、trigger、preview、confirm zrpc 能力 2. active-scheduler outbox consumer、relay、retry loop 和 due job scanner 迁入独立服务入口,gateway `/active-schedule/*` 改为通过 zrpc client 调用 3. gateway 目录收口为 `gateway/api` + `gateway/client`,统一归档 userauth、notification、active-scheduler 的 HTTP 门面和 zrpc client 4. 将旧 `backend/active_scheduler` 领域核心下沉到 `services/active_scheduler/core`,清退旧根目录活跃实现,并补充 active-scheduler 启动期跨域依赖表检查 5. 调整单体启动与 outbox 归属,`cmd/all` 不再启动 active-scheduler workflow、scanner 或 handler 文档: 1. 更新微服务迁移计划,将阶段 4 active-scheduler 标记为首轮收口完成,并明确下一阶段进入 schedule / task / course / task-class
This commit is contained in:
314
backend/services/active_scheduler/core/adapters/gorm_readers.go
Normal file
314
backend/services/active_scheduler/core/adapters/gorm_readers.go
Normal file
@@ -0,0 +1,314 @@
|
||||
package adapters
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// GormReaders 是主动调度 dry-run 的只读适配器。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只在 adapter 层直接读取现有表,把外部领域模型转换为主动调度 facts;
|
||||
// 2. 不生成候选、不写 preview、不写正式日程;
|
||||
// 3. 后续拆微服务时可替换为 RPC/read model adapter,active_scheduler 主链路无需改动。
|
||||
type GormReaders struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewGormReaders(db *gorm.DB) *GormReaders {
|
||||
return &GormReaders{db: db}
|
||||
}
|
||||
|
||||
func ReadersFromGorm(readers *GormReaders) ports.Readers {
|
||||
return ports.Readers{
|
||||
TaskReader: readers,
|
||||
ScheduleReader: readers,
|
||||
FeedbackReader: readers,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *GormReaders) ensureDB() error {
|
||||
if r == nil || r.db == nil {
|
||||
return errors.New("主动调度 GormReaders 未初始化")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *GormReaders) GetTaskForActiveSchedule(ctx context.Context, req ports.TaskRequest) (ports.TaskFact, bool, error) {
|
||||
if err := r.ensureDB(); err != nil {
|
||||
return ports.TaskFact{}, false, err
|
||||
}
|
||||
if req.UserID <= 0 || req.TaskID <= 0 {
|
||||
return ports.TaskFact{}, false, nil
|
||||
}
|
||||
|
||||
var task model.Task
|
||||
err := r.db.WithContext(ctx).
|
||||
Where("id = ? AND user_id = ?", req.TaskID, req.UserID).
|
||||
First(&task).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return ports.TaskFact{}, false, nil
|
||||
}
|
||||
return ports.TaskFact{}, false, err
|
||||
}
|
||||
|
||||
estimatedSections := task.EstimatedSections
|
||||
if estimatedSections <= 0 {
|
||||
estimatedSections = 1
|
||||
}
|
||||
if estimatedSections > 4 {
|
||||
estimatedSections = 4
|
||||
}
|
||||
return ports.TaskFact{
|
||||
ID: task.ID,
|
||||
UserID: task.UserID,
|
||||
Title: task.Title,
|
||||
Priority: task.Priority,
|
||||
IsCompleted: task.IsCompleted,
|
||||
DeadlineAt: task.DeadlineAt,
|
||||
UrgencyThresholdAt: task.UrgencyThresholdAt,
|
||||
EstimatedSections: estimatedSections,
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func (r *GormReaders) GetScheduleFactsByWindow(ctx context.Context, req ports.ScheduleWindowRequest) (ports.ScheduleWindowFacts, error) {
|
||||
if err := r.ensureDB(); err != nil {
|
||||
return ports.ScheduleWindowFacts{}, err
|
||||
}
|
||||
if req.UserID <= 0 || req.WindowStart.IsZero() || !req.WindowEnd.After(req.WindowStart) {
|
||||
return ports.ScheduleWindowFacts{}, nil
|
||||
}
|
||||
|
||||
windowSlots, err := buildWindowSlots(req.WindowStart, req.WindowEnd)
|
||||
if err != nil {
|
||||
return ports.ScheduleWindowFacts{}, err
|
||||
}
|
||||
weeks := uniqueWeeks(windowSlots)
|
||||
|
||||
var schedules []model.Schedule
|
||||
if len(weeks) > 0 {
|
||||
err = r.db.WithContext(ctx).
|
||||
Preload("Event").
|
||||
Where("user_id = ? AND week IN ?", req.UserID, weeks).
|
||||
Find(&schedules).Error
|
||||
if err != nil {
|
||||
return ports.ScheduleWindowFacts{}, err
|
||||
}
|
||||
}
|
||||
|
||||
occupiedByKey := make(map[string]model.Schedule, len(schedules))
|
||||
eventFacts := make(map[int]*ports.ScheduleEventFact)
|
||||
targetAlreadyScheduled := false
|
||||
for _, schedule := range schedules {
|
||||
if schedule.Event == nil {
|
||||
continue
|
||||
}
|
||||
slot, ok := slotFromSchedule(schedule)
|
||||
if !ok || slot.StartAt.Before(req.WindowStart) || !slot.StartAt.Before(req.WindowEnd) {
|
||||
continue
|
||||
}
|
||||
occupiedByKey[slotKey(slot)] = schedule
|
||||
eventFact := eventFacts[schedule.EventID]
|
||||
if eventFact == nil {
|
||||
eventFact = scheduleToEventFact(schedule)
|
||||
eventFacts[schedule.EventID] = eventFact
|
||||
}
|
||||
eventFact.Slots = append(eventFact.Slots, slot)
|
||||
if isSameTarget(schedule.Event, req.TargetType, req.TargetID) {
|
||||
targetAlreadyScheduled = true
|
||||
}
|
||||
}
|
||||
|
||||
occupiedSlots := make([]ports.Slot, 0, len(occupiedByKey))
|
||||
freeSlots := make([]ports.Slot, 0, len(windowSlots))
|
||||
for _, slot := range windowSlots {
|
||||
if schedule, exists := occupiedByKey[slotKey(slot)]; exists {
|
||||
occupied, ok := slotFromSchedule(schedule)
|
||||
if ok {
|
||||
occupiedSlots = append(occupiedSlots, occupied)
|
||||
}
|
||||
continue
|
||||
}
|
||||
freeSlots = append(freeSlots, slot)
|
||||
}
|
||||
|
||||
events := make([]ports.ScheduleEventFact, 0, len(eventFacts))
|
||||
for _, fact := range eventFacts {
|
||||
events = append(events, *fact)
|
||||
}
|
||||
return ports.ScheduleWindowFacts{
|
||||
Events: events,
|
||||
OccupiedSlots: occupiedSlots,
|
||||
FreeSlots: freeSlots,
|
||||
NextDynamicTask: firstDynamicTask(events, req.Now),
|
||||
TargetAlreadyScheduled: targetAlreadyScheduled,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *GormReaders) GetFeedbackSignal(ctx context.Context, req ports.FeedbackRequest) (ports.FeedbackFact, bool, error) {
|
||||
if err := r.ensureDB(); err != nil {
|
||||
return ports.FeedbackFact{}, false, err
|
||||
}
|
||||
// 1. 第一版没有独立 feedback 表,显式传入 schedule_event target 时,把该事件视为已定位反馈目标。
|
||||
// 2. 若无法定位目标,返回 found=true + TargetKnown=false,让 observe 阶段稳定降级 ask_user。
|
||||
if req.TargetType != string(trigger.TargetTypeScheduleEvent) || req.TargetID <= 0 {
|
||||
return ports.FeedbackFact{
|
||||
FeedbackID: firstNonEmpty(req.FeedbackID, req.IdempotencyKey),
|
||||
TargetKnown: false,
|
||||
SubmittedAt: time.Now(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
var event model.ScheduleEvent
|
||||
err := r.db.WithContext(ctx).
|
||||
Where("id = ? AND user_id = ?", req.TargetID, req.UserID).
|
||||
First(&event).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return ports.FeedbackFact{
|
||||
FeedbackID: firstNonEmpty(req.FeedbackID, req.IdempotencyKey),
|
||||
TargetKnown: false,
|
||||
SubmittedAt: time.Now(),
|
||||
}, true, nil
|
||||
}
|
||||
return ports.FeedbackFact{}, false, err
|
||||
}
|
||||
taskItemID := 0
|
||||
if event.RelID != nil {
|
||||
taskItemID = *event.RelID
|
||||
}
|
||||
return ports.FeedbackFact{
|
||||
FeedbackID: firstNonEmpty(req.FeedbackID, req.IdempotencyKey),
|
||||
TargetKnown: true,
|
||||
TargetEventID: event.ID,
|
||||
TargetTaskItemID: taskItemID,
|
||||
TargetTitle: event.Name,
|
||||
SubmittedAt: time.Now(),
|
||||
}, true, nil
|
||||
}
|
||||
|
||||
func buildWindowSlots(startAt, endAt time.Time) ([]ports.Slot, error) {
|
||||
slots := make([]ports.Slot, 0, 24)
|
||||
for day := truncateToDate(startAt); day.Before(endAt); day = day.AddDate(0, 0, 1) {
|
||||
week, dayOfWeek, err := conv.RealDateToRelativeDate(day.Format(conv.DateFormat))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for section := 1; section <= 12; section++ {
|
||||
sectionStart, sectionEnd, err := conv.RelativeTimeToRealTime(week, dayOfWeek, section, section)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sectionStart.Before(startAt) || !sectionStart.Before(endAt) {
|
||||
continue
|
||||
}
|
||||
slots = append(slots, ports.Slot{
|
||||
Week: week,
|
||||
DayOfWeek: dayOfWeek,
|
||||
Section: section,
|
||||
StartAt: sectionStart,
|
||||
EndAt: sectionEnd,
|
||||
})
|
||||
}
|
||||
}
|
||||
return slots, nil
|
||||
}
|
||||
|
||||
func slotFromSchedule(schedule model.Schedule) (ports.Slot, bool) {
|
||||
startAt, endAt, err := conv.RelativeTimeToRealTime(schedule.Week, schedule.DayOfWeek, schedule.Section, schedule.Section)
|
||||
if err != nil {
|
||||
return ports.Slot{}, false
|
||||
}
|
||||
return ports.Slot{
|
||||
Week: schedule.Week,
|
||||
DayOfWeek: schedule.DayOfWeek,
|
||||
Section: schedule.Section,
|
||||
StartAt: startAt,
|
||||
EndAt: endAt,
|
||||
}, true
|
||||
}
|
||||
|
||||
func scheduleToEventFact(schedule model.Schedule) *ports.ScheduleEventFact {
|
||||
event := schedule.Event
|
||||
relID := 0
|
||||
if event.RelID != nil {
|
||||
relID = *event.RelID
|
||||
}
|
||||
sourceType := event.TaskSourceType
|
||||
if sourceType == "" && event.Type == "task" {
|
||||
sourceType = string(trigger.TargetTypeTaskItem)
|
||||
}
|
||||
return &ports.ScheduleEventFact{
|
||||
ID: event.ID,
|
||||
UserID: event.UserID,
|
||||
Title: event.Name,
|
||||
SourceType: sourceType,
|
||||
RelID: relID,
|
||||
IsDynamicTask: event.Type == "task",
|
||||
TaskItemID: relID,
|
||||
}
|
||||
}
|
||||
|
||||
func isSameTarget(event *model.ScheduleEvent, targetType string, targetID int) bool {
|
||||
if event == nil || targetID <= 0 || event.RelID == nil || event.Type != "task" {
|
||||
return false
|
||||
}
|
||||
sourceType := event.TaskSourceType
|
||||
if sourceType == "" {
|
||||
sourceType = string(trigger.TargetTypeTaskItem)
|
||||
}
|
||||
return sourceType == targetType && *event.RelID == targetID
|
||||
}
|
||||
|
||||
func firstDynamicTask(events []ports.ScheduleEventFact, now time.Time) *ports.ScheduleEventFact {
|
||||
for i := range events {
|
||||
if !events[i].IsDynamicTask {
|
||||
continue
|
||||
}
|
||||
for _, slot := range events[i].Slots {
|
||||
if slot.StartAt.IsZero() || !slot.StartAt.Before(now) {
|
||||
return &events[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func uniqueWeeks(slots []ports.Slot) []int {
|
||||
seen := make(map[int]struct{})
|
||||
weeks := make([]int, 0)
|
||||
for _, slot := range slots {
|
||||
if _, exists := seen[slot.Week]; exists {
|
||||
continue
|
||||
}
|
||||
seen[slot.Week] = struct{}{}
|
||||
weeks = append(weeks, slot.Week)
|
||||
}
|
||||
return weeks
|
||||
}
|
||||
|
||||
func slotKey(slot ports.Slot) string {
|
||||
return fmt.Sprintf("%d:%d:%d", slot.Week, slot.DayOfWeek, slot.Section)
|
||||
}
|
||||
|
||||
func truncateToDate(t time.Time) time.Time {
|
||||
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if value != "" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
261
backend/services/active_scheduler/core/apply/convert.go
Normal file
261
backend/services/active_scheduler/core/apply/convert.go
Normal file
@@ -0,0 +1,261 @@
|
||||
package apply
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
)
|
||||
|
||||
const (
|
||||
rawChangeTypeAdd ChangeType = "add"
|
||||
rawChangeTypeNone ChangeType = "none"
|
||||
)
|
||||
|
||||
type candidateSnapshot struct {
|
||||
CandidateID string
|
||||
CandidateType ChangeType
|
||||
Changes []ApplyChange
|
||||
}
|
||||
|
||||
// ConvertConfirmToApplyRequest 把 preview 中的候选和 confirm 请求转换为正式 apply 请求。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只读取调用方传入的 preview 快照,不直接访问数据库;
|
||||
// 2. 负责候选定位、edited_changes 覆盖、范围校验、幂等摘要和 ApplyCommand 生成;
|
||||
// 3. 不写 schedules,也不执行 task/schedule 当前真值重校验,后者由 ScheduleApplyPort 完成。
|
||||
func ConvertConfirmToApplyRequest(preview model.ActiveSchedulePreview, req ConfirmRequest, now time.Time) (*ApplyActiveScheduleRequest, error) {
|
||||
if req.PreviewID == "" {
|
||||
req.PreviewID = preview.ID
|
||||
}
|
||||
if req.Action == "" {
|
||||
req.Action = ConfirmActionConfirm
|
||||
}
|
||||
if req.RequestedAt.IsZero() {
|
||||
req.RequestedAt = now
|
||||
}
|
||||
if req.UserID <= 0 {
|
||||
return nil, newApplyError(ErrorCodeInvalidRequest, "user_id 必须由接入层填入", nil)
|
||||
}
|
||||
if strings.TrimSpace(req.CandidateID) == "" {
|
||||
return nil, newApplyError(ErrorCodeInvalidRequest, "candidate_id 不能为空", nil)
|
||||
}
|
||||
if req.Action != ConfirmActionConfirm {
|
||||
return nil, newApplyError(ErrorCodeInvalidRequest, "当前只支持 confirm 动作", nil)
|
||||
}
|
||||
if err := ValidatePreviewConfirmable(preview, req.UserID, now); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
requestHash, err := BuildConfirmRequestHash(preview.ID, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if DetectIdempotencyConflict(preview, requestHash.ApplyHash, req.IdempotencyKey) {
|
||||
return nil, newApplyError(ErrorCodeIdempotencyConflict, "同一个幂等键已绑定不同确认内容", nil)
|
||||
}
|
||||
|
||||
candidate, err := FindCandidateInPreview(preview, req.CandidateID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
originalChanges, err := NormalizeChanges(candidate.Changes, candidate.CandidateType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
changes := originalChanges
|
||||
if len(req.EditedChanges) > 0 {
|
||||
editedChanges, normalizeErr := NormalizeChanges(req.EditedChanges, candidate.CandidateType)
|
||||
if normalizeErr != nil {
|
||||
return nil, normalizeErr
|
||||
}
|
||||
if validateErr := ValidateChangeScope(originalChanges, editedChanges); validateErr != nil {
|
||||
return nil, validateErr
|
||||
}
|
||||
changes = editedChanges
|
||||
}
|
||||
|
||||
normalizedHash, err := BuildNormalizedChangesHash(changes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
commands, skipped, err := ConvertChangesToCommands(changes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ApplyActiveScheduleRequest{
|
||||
PreviewID: preview.ID,
|
||||
ApplyID: requestHash.ApplyID,
|
||||
IdempotencyKey: strings.TrimSpace(req.IdempotencyKey),
|
||||
RequestHash: requestHash.ApplyHash,
|
||||
RequestBodyHash: requestHash.BodyHash,
|
||||
UserID: req.UserID,
|
||||
CandidateID: req.CandidateID,
|
||||
BaseVersion: preview.BaseVersion,
|
||||
Changes: changes,
|
||||
Commands: commands,
|
||||
SkippedChanges: skipped,
|
||||
NormalizedChangesHash: normalizedHash,
|
||||
RequestedAt: req.RequestedAt,
|
||||
TraceID: req.TraceID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// FindCandidateInPreview 从 selected_candidate_json 或 candidates_json 中定位 confirm 指定候选。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 优先使用 selected_candidate_json,只有候选 ID 不匹配时才回退 candidates_json;
|
||||
// 2. 兼容 Go 结构体默认 JSON 字段名和前端常用 snake_case 字段;
|
||||
// 3. 只返回候选快照,不判断候选是否仍可落库。
|
||||
func FindCandidateInPreview(preview model.ActiveSchedulePreview, candidateID string) (candidateSnapshot, error) {
|
||||
candidateID = strings.TrimSpace(candidateID)
|
||||
if candidateID == "" {
|
||||
return candidateSnapshot{}, newApplyError(ErrorCodeInvalidRequest, "candidate_id 不能为空", nil)
|
||||
}
|
||||
|
||||
if preview.SelectedCandidateJSON != nil && strings.TrimSpace(*preview.SelectedCandidateJSON) != "" {
|
||||
selected, err := parseCandidateSnapshot([]byte(*preview.SelectedCandidateJSON))
|
||||
if err != nil {
|
||||
return candidateSnapshot{}, err
|
||||
}
|
||||
if selected.CandidateID == candidateID {
|
||||
return selected, nil
|
||||
}
|
||||
}
|
||||
|
||||
candidates, err := parseCandidateList(preview.CandidatesJSON)
|
||||
if err != nil {
|
||||
return candidateSnapshot{}, err
|
||||
}
|
||||
for _, item := range candidates {
|
||||
if item.CandidateID == candidateID {
|
||||
return item, nil
|
||||
}
|
||||
}
|
||||
return candidateSnapshot{}, newApplyError(ErrorCodeTargetNotFound, "confirm 指定的 candidate_id 不属于当前 preview", nil)
|
||||
}
|
||||
|
||||
// NormalizeChanges 对候选或用户编辑后的 changes 做最小规范化。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 填充 change_type、target、duration 和 slots 等缺省字段;
|
||||
// 2. 合并同一目标的连续节次,降低后续 port 写入复杂度;
|
||||
// 3. 不做数据库事实校验,不判断冲突。
|
||||
func NormalizeChanges(changes []ApplyChange, candidateType ChangeType) ([]ApplyChange, error) {
|
||||
if len(changes) == 0 && isNoopChangeType(candidateType) {
|
||||
changes = []ApplyChange{{Type: candidateType}}
|
||||
}
|
||||
if len(changes) == 0 {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "候选没有可转换的 changes", nil)
|
||||
}
|
||||
|
||||
normalized := make([]ApplyChange, 0, len(changes))
|
||||
for _, change := range changes {
|
||||
item, err := normalizeChange(change, candidateType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
normalized = append(normalized, item)
|
||||
}
|
||||
|
||||
sort.SliceStable(normalized, func(i, j int) bool {
|
||||
return changeSortKey(normalized[i]) < changeSortKey(normalized[j])
|
||||
})
|
||||
merged := mergeContinuousChanges(normalized)
|
||||
for i := range merged {
|
||||
hash, err := hashJSON(changeForHash(merged[i]))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
merged[i].NormalizedHash = hash
|
||||
}
|
||||
return merged, nil
|
||||
}
|
||||
|
||||
// ValidateChangeScope 校验 edited_changes 没有新增候选外目标。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只比较候选原始 changes 与用户编辑后的 changes;
|
||||
// 2. 允许 EditedAllowed=true 的 change 改时间坐标,但不允许改 target/type;
|
||||
// 3. 更细的冲突、课程覆盖、base_version 重校验仍交给 apply port。
|
||||
func ValidateChangeScope(original []ApplyChange, edited []ApplyChange) error {
|
||||
allowed := make(map[string]ApplyChange, len(original))
|
||||
for _, change := range original {
|
||||
allowed[changeScopeKey(change)] = change
|
||||
}
|
||||
seen := make(map[string]struct{}, len(edited))
|
||||
for _, change := range edited {
|
||||
key := changeScopeKey(change)
|
||||
base, ok := allowed[key]
|
||||
if !ok {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "edited_changes 包含候选外目标或变更类型", nil)
|
||||
}
|
||||
if _, exists := seen[key]; exists {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "edited_changes 存在重复目标", nil)
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
if !base.EditedAllowed && !sameChangeForScope(base, change) {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "该 change 不允许用户编辑", nil)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertChangesToCommands 把规范化后的 changes 转成正式写入命令。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. MVP 只生成 task_pool 新增和补做块新增两类命令;
|
||||
// 2. ask_user/notify_only/close 只返回 skipped_changes,不写正式日程;
|
||||
// 3. compress_with_next_dynamic_task 明确拒绝,避免 confirm 后出现不可应用候选。
|
||||
func ConvertChangesToCommands(changes []ApplyChange) ([]ApplyCommand, []SkippedChange, error) {
|
||||
commands := make([]ApplyCommand, 0, len(changes))
|
||||
skipped := make([]SkippedChange, 0)
|
||||
for _, change := range changes {
|
||||
switch change.Type {
|
||||
case ChangeTypeAddTaskPoolToSchedule:
|
||||
if change.TargetID <= 0 {
|
||||
return nil, nil, newApplyError(ErrorCodeInvalidEditedChanges, "task_pool change 缺少 task_id/target_id", nil)
|
||||
}
|
||||
commands = append(commands, ApplyCommand{
|
||||
CommandType: CommandTypeInsertTaskPoolEvent,
|
||||
ChangeID: change.ChangeID,
|
||||
ChangeType: change.Type,
|
||||
TargetType: firstNonEmpty(change.TargetType, "task_pool"),
|
||||
TargetID: change.TargetID,
|
||||
Slots: slotsFromChange(change),
|
||||
Metadata: cloneMetadata(change.Metadata),
|
||||
})
|
||||
case ChangeTypeCreateMakeup:
|
||||
sourceEventID := firstPositive(change.SourceEventID, change.MakeupForEventID, change.EventID, change.TargetID)
|
||||
if sourceEventID <= 0 {
|
||||
return nil, nil, newApplyError(ErrorCodeInvalidEditedChanges, "create_makeup 缺少原 schedule_event id", nil)
|
||||
}
|
||||
commands = append(commands, ApplyCommand{
|
||||
CommandType: CommandTypeInsertMakeupEvent,
|
||||
ChangeID: change.ChangeID,
|
||||
ChangeType: change.Type,
|
||||
TargetType: firstNonEmpty(change.TargetType, "schedule_event"),
|
||||
TargetID: firstPositive(change.TargetID, sourceEventID),
|
||||
Slots: slotsFromChange(change),
|
||||
SourceEventID: sourceEventID,
|
||||
Metadata: cloneMetadata(change.Metadata),
|
||||
})
|
||||
case ChangeTypeAskUser, ChangeTypeNotifyOnly, ChangeTypeClose:
|
||||
skipped = append(skipped, SkippedChange{
|
||||
ChangeID: change.ChangeID,
|
||||
ChangeType: change.Type,
|
||||
Reason: "该候选只更新交互状态或通知结果,不写正式日程",
|
||||
})
|
||||
case ChangeTypeCompressWithNextDynamicTask:
|
||||
return nil, nil, newApplyError(ErrorCodeUnsupportedChangeType, "MVP 明确关闭压缩融合 apply", nil)
|
||||
default:
|
||||
return nil, nil, newApplyError(ErrorCodeUnsupportedChangeType, fmt.Sprintf("不支持的 change_type: %s", change.Type), nil)
|
||||
}
|
||||
}
|
||||
return commands, skipped, nil
|
||||
}
|
||||
455
backend/services/active_scheduler/core/apply/convert_helpers.go
Normal file
455
backend/services/active_scheduler/core/apply/convert_helpers.go
Normal file
@@ -0,0 +1,455 @@
|
||||
package apply
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func normalizeChange(change ApplyChange, candidateType ChangeType) (ApplyChange, error) {
|
||||
change.Type = normalizeChangeType(change.Type, candidateType)
|
||||
if change.Type == ChangeTypeCompressWithNextDynamicTask {
|
||||
return ApplyChange{}, newApplyError(ErrorCodeUnsupportedChangeType, "MVP 明确关闭压缩融合 apply", nil)
|
||||
}
|
||||
if isNoopChangeType(change.Type) {
|
||||
return change, nil
|
||||
}
|
||||
|
||||
fillTargetFields(&change)
|
||||
fillSlotFields(&change)
|
||||
if err := validateSlotFields(change); err != nil {
|
||||
return ApplyChange{}, err
|
||||
}
|
||||
return change, nil
|
||||
}
|
||||
|
||||
func normalizeChangeType(changeType ChangeType, candidateType ChangeType) ChangeType {
|
||||
if changeType == "" || changeType == rawChangeTypeNone {
|
||||
if isNoopChangeType(candidateType) {
|
||||
return candidateType
|
||||
}
|
||||
return candidateType
|
||||
}
|
||||
if changeType == rawChangeTypeAdd && candidateType == ChangeTypeAddTaskPoolToSchedule {
|
||||
return ChangeTypeAddTaskPoolToSchedule
|
||||
}
|
||||
if changeType == rawChangeTypeAdd && candidateType == ChangeTypeCreateMakeup {
|
||||
return ChangeTypeCreateMakeup
|
||||
}
|
||||
return changeType
|
||||
}
|
||||
|
||||
func fillTargetFields(change *ApplyChange) {
|
||||
switch change.Type {
|
||||
case ChangeTypeAddTaskPoolToSchedule:
|
||||
change.TargetType = firstNonEmpty(change.TargetType, "task_pool")
|
||||
change.TargetID = firstPositive(change.TargetID, change.TaskID)
|
||||
change.TaskID = firstPositive(change.TaskID, change.TargetID)
|
||||
case ChangeTypeCreateMakeup:
|
||||
sourceEventID := firstPositive(change.SourceEventID, change.MakeupForEventID, change.EventID, change.TargetID)
|
||||
change.TargetType = firstNonEmpty(change.TargetType, "schedule_event")
|
||||
change.TargetID = firstPositive(change.TargetID, sourceEventID)
|
||||
change.EventID = firstPositive(change.EventID, sourceEventID)
|
||||
change.SourceEventID = sourceEventID
|
||||
change.MakeupForEventID = firstPositive(change.MakeupForEventID, sourceEventID)
|
||||
}
|
||||
}
|
||||
|
||||
func fillSlotFields(change *ApplyChange) {
|
||||
if len(change.Slots) > 0 {
|
||||
sort.SliceStable(change.Slots, func(i, j int) bool {
|
||||
return slotSortKey(change.Slots[i]) < slotSortKey(change.Slots[j])
|
||||
})
|
||||
first := change.Slots[0]
|
||||
last := change.Slots[len(change.Slots)-1]
|
||||
change.Week = firstPositive(change.Week, first.Week)
|
||||
change.DayOfWeek = firstPositive(change.DayOfWeek, first.DayOfWeek)
|
||||
change.SectionFrom = firstPositive(change.SectionFrom, first.Section)
|
||||
change.SectionTo = firstPositive(change.SectionTo, last.Section)
|
||||
}
|
||||
if change.DurationSections <= 0 && change.SectionFrom > 0 && change.SectionTo >= change.SectionFrom {
|
||||
change.DurationSections = change.SectionTo - change.SectionFrom + 1
|
||||
}
|
||||
if change.DurationSections <= 0 {
|
||||
change.DurationSections = 1
|
||||
}
|
||||
if change.SectionTo <= 0 && change.SectionFrom > 0 {
|
||||
change.SectionTo = change.SectionFrom + change.DurationSections - 1
|
||||
}
|
||||
if len(change.Slots) == 0 && change.Week > 0 && change.DayOfWeek > 0 && change.SectionFrom > 0 && change.SectionTo >= change.SectionFrom {
|
||||
change.Slots = buildSlots(change.Week, change.DayOfWeek, change.SectionFrom, change.SectionTo)
|
||||
}
|
||||
}
|
||||
|
||||
func validateSlotFields(change ApplyChange) error {
|
||||
if change.TargetID <= 0 {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "change 缺少合法 target_id", nil)
|
||||
}
|
||||
if change.Week <= 0 || change.DayOfWeek <= 0 || change.SectionFrom <= 0 || change.SectionTo <= 0 {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "change 缺少合法节次坐标", nil)
|
||||
}
|
||||
if change.DayOfWeek < 1 || change.DayOfWeek > 7 {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "day_of_week 必须在 1 到 7 之间", nil)
|
||||
}
|
||||
if change.SectionFrom < 1 || change.SectionFrom > 12 || change.SectionTo < 1 || change.SectionTo > 12 || change.SectionTo < change.SectionFrom {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "section_from/section_to 必须是合法连续节次", nil)
|
||||
}
|
||||
if change.DurationSections != change.SectionTo-change.SectionFrom+1 {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "duration_sections 与节次数量不一致", nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseCandidateList(raw *string) ([]candidateSnapshot, error) {
|
||||
if raw == nil || strings.TrimSpace(*raw) == "" {
|
||||
return nil, nil
|
||||
}
|
||||
var first any
|
||||
if err := json.Unmarshal([]byte(*raw), &first); err != nil {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "candidates_json 不是合法 JSON", err)
|
||||
}
|
||||
switch typed := first.(type) {
|
||||
case []any:
|
||||
result := make([]candidateSnapshot, 0, len(typed))
|
||||
var raws []json.RawMessage
|
||||
if err := json.Unmarshal([]byte(*raw), &raws); err != nil {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "candidates_json 候选数组解析失败", err)
|
||||
}
|
||||
for _, item := range raws {
|
||||
candidate, err := parseCandidateSnapshot(item)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, candidate)
|
||||
}
|
||||
return result, nil
|
||||
case map[string]any:
|
||||
obj := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal([]byte(*raw), &obj); err != nil {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "candidates_json 候选对象解析失败", err)
|
||||
}
|
||||
if nested := rawField(obj, "candidates", "Candidates"); len(nested) > 0 {
|
||||
nestedText := string(nested)
|
||||
return parseCandidateList(&nestedText)
|
||||
}
|
||||
candidate, err := parseCandidateSnapshot([]byte(*raw))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return []candidateSnapshot{candidate}, nil
|
||||
default:
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "candidates_json 结构不受支持", nil)
|
||||
}
|
||||
}
|
||||
|
||||
func parseCandidateSnapshot(raw []byte) (candidateSnapshot, error) {
|
||||
obj := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal(raw, &obj); err != nil {
|
||||
return candidateSnapshot{}, newApplyError(ErrorCodeInvalidEditedChanges, "candidate JSON 解析失败", err)
|
||||
}
|
||||
candidate := candidateSnapshot{
|
||||
CandidateID: stringField(obj, "candidate_id", "CandidateID", "id", "ID"),
|
||||
CandidateType: ChangeType(stringField(obj, "candidate_type", "CandidateType", "type", "change_type")),
|
||||
}
|
||||
if candidate.CandidateID == "" {
|
||||
return candidateSnapshot{}, newApplyError(ErrorCodeInvalidEditedChanges, "candidate 缺少 candidate_id", nil)
|
||||
}
|
||||
if candidate.CandidateType == "" {
|
||||
return candidateSnapshot{}, newApplyError(ErrorCodeInvalidEditedChanges, "candidate 缺少 candidate_type", nil)
|
||||
}
|
||||
|
||||
changeRaws := rawArrayField(obj, "changes", "Changes", "preview_changes", "PreviewChanges")
|
||||
candidate.Changes = make([]ApplyChange, 0, len(changeRaws))
|
||||
for _, rawChange := range changeRaws {
|
||||
change, err := parseApplyChange(rawChange, candidate.CandidateType)
|
||||
if err != nil {
|
||||
return candidateSnapshot{}, err
|
||||
}
|
||||
candidate.Changes = append(candidate.Changes, change)
|
||||
}
|
||||
if len(candidate.Changes) == 0 && isNoopChangeType(candidate.CandidateType) {
|
||||
candidate.Changes = []ApplyChange{{Type: candidate.CandidateType}}
|
||||
}
|
||||
return candidate, nil
|
||||
}
|
||||
|
||||
func parseApplyChange(raw []byte, candidateType ChangeType) (ApplyChange, error) {
|
||||
obj := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal(raw, &obj); err != nil {
|
||||
return ApplyChange{}, newApplyError(ErrorCodeInvalidEditedChanges, "change JSON 解析失败", err)
|
||||
}
|
||||
change := ApplyChange{
|
||||
ChangeID: stringField(obj, "change_id", "ChangeID", "id", "ID"),
|
||||
Type: ChangeType(stringField(obj, "type", "change_type", "ChangeType")),
|
||||
TargetType: stringField(obj, "target_type", "TargetType"),
|
||||
TargetID: intField(obj, "target_id", "TargetID"),
|
||||
TaskID: intField(obj, "task_id", "TaskID"),
|
||||
EventID: intField(obj, "event_id", "EventID"),
|
||||
Week: intField(obj, "week", "Week"),
|
||||
DayOfWeek: intField(obj, "day_of_week", "DayOfWeek"),
|
||||
SectionFrom: intField(obj, "section_from", "SectionFrom"),
|
||||
SectionTo: intField(obj, "section_to", "SectionTo"),
|
||||
DurationSections: intField(obj, "duration_sections", "DurationSections"),
|
||||
MakeupForEventID: intField(obj, "makeup_for_event_id", "MakeupForEventID"),
|
||||
SourceEventID: intField(obj, "source_event_id", "SourceEventID"),
|
||||
EditedAllowed: boolField(obj, "edited_allowed", "EditedAllowed"),
|
||||
Metadata: mapStringField(obj, "metadata", "Metadata"),
|
||||
}
|
||||
if change.Type == "" {
|
||||
change.Type = candidateType
|
||||
}
|
||||
if len(change.Metadata) > 0 {
|
||||
change.MakeupForEventID = firstPositive(change.MakeupForEventID, parsePositiveInt(change.Metadata["makeup_for_event_id"]))
|
||||
change.SourceEventID = firstPositive(change.SourceEventID, parsePositiveInt(change.Metadata["source_event_id"]))
|
||||
}
|
||||
change.Slots = slotsFromRawChange(obj)
|
||||
return change, nil
|
||||
}
|
||||
|
||||
func slotsFromRawChange(obj map[string]json.RawMessage) []Slot {
|
||||
if raw := rawField(obj, "slots", "Slots"); len(raw) > 0 {
|
||||
var slots []Slot
|
||||
if err := json.Unmarshal(raw, &slots); err == nil && len(slots) > 0 {
|
||||
return slots
|
||||
}
|
||||
}
|
||||
if raw := rawField(obj, "to_slot", "ToSlot"); len(raw) > 0 {
|
||||
span, ok := parseSlotSpan(raw)
|
||||
if ok {
|
||||
return buildSlots(span.Start.Week, span.Start.DayOfWeek, span.Start.Section, span.End.Section)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseSlotSpan(raw []byte) (SlotSpan, bool) {
|
||||
obj := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal(raw, &obj); err != nil {
|
||||
return SlotSpan{}, false
|
||||
}
|
||||
start := parseSlot(rawField(obj, "start", "Start"))
|
||||
end := parseSlot(rawField(obj, "end", "End"))
|
||||
duration := intField(obj, "duration_sections", "DurationSections")
|
||||
if end.IsZero() && !start.IsZero() {
|
||||
end = start
|
||||
if duration > 1 {
|
||||
end.Section = start.Section + duration - 1
|
||||
}
|
||||
}
|
||||
return SlotSpan{Start: start, End: end, DurationSections: firstPositive(duration, end.Section-start.Section+1)}, !start.IsZero() && !end.IsZero()
|
||||
}
|
||||
|
||||
func parseSlot(raw []byte) Slot {
|
||||
if len(raw) == 0 {
|
||||
return Slot{}
|
||||
}
|
||||
obj := make(map[string]json.RawMessage)
|
||||
if err := json.Unmarshal(raw, &obj); err != nil {
|
||||
return Slot{}
|
||||
}
|
||||
return Slot{
|
||||
Week: intField(obj, "week", "Week"),
|
||||
DayOfWeek: intField(obj, "day_of_week", "DayOfWeek"),
|
||||
Section: intField(obj, "section", "Section"),
|
||||
}
|
||||
}
|
||||
|
||||
func rawField(obj map[string]json.RawMessage, keys ...string) json.RawMessage {
|
||||
for _, key := range keys {
|
||||
if raw, ok := obj[key]; ok && len(raw) > 0 && string(raw) != "null" {
|
||||
return raw
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func rawArrayField(obj map[string]json.RawMessage, keys ...string) []json.RawMessage {
|
||||
raw := rawField(obj, keys...)
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
var items []json.RawMessage
|
||||
if err := json.Unmarshal(raw, &items); err != nil {
|
||||
return nil
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
func stringField(obj map[string]json.RawMessage, keys ...string) string {
|
||||
raw := rawField(obj, keys...)
|
||||
if len(raw) == 0 {
|
||||
return ""
|
||||
}
|
||||
var value string
|
||||
if err := json.Unmarshal(raw, &value); err == nil {
|
||||
return strings.TrimSpace(value)
|
||||
}
|
||||
return strings.Trim(strings.TrimSpace(string(raw)), `"`)
|
||||
}
|
||||
|
||||
func intField(obj map[string]json.RawMessage, keys ...string) int {
|
||||
raw := rawField(obj, keys...)
|
||||
if len(raw) == 0 {
|
||||
return 0
|
||||
}
|
||||
var value int
|
||||
if err := json.Unmarshal(raw, &value); err == nil {
|
||||
return value
|
||||
}
|
||||
var asString string
|
||||
if err := json.Unmarshal(raw, &asString); err == nil {
|
||||
return parsePositiveInt(asString)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func boolField(obj map[string]json.RawMessage, keys ...string) bool {
|
||||
raw := rawField(obj, keys...)
|
||||
if len(raw) == 0 {
|
||||
return false
|
||||
}
|
||||
var value bool
|
||||
if err := json.Unmarshal(raw, &value); err == nil {
|
||||
return value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func mapStringField(obj map[string]json.RawMessage, keys ...string) map[string]string {
|
||||
raw := rawField(obj, keys...)
|
||||
if len(raw) == 0 {
|
||||
return nil
|
||||
}
|
||||
var result map[string]string
|
||||
if err := json.Unmarshal(raw, &result); err == nil {
|
||||
return result
|
||||
}
|
||||
var loose map[string]any
|
||||
if err := json.Unmarshal(raw, &loose); err != nil {
|
||||
return nil
|
||||
}
|
||||
result = make(map[string]string, len(loose))
|
||||
for key, value := range loose {
|
||||
result[key] = fmt.Sprint(value)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func buildSlots(week int, dayOfWeek int, sectionFrom int, sectionTo int) []Slot {
|
||||
if week <= 0 || dayOfWeek <= 0 || sectionFrom <= 0 || sectionTo < sectionFrom {
|
||||
return nil
|
||||
}
|
||||
slots := make([]Slot, 0, sectionTo-sectionFrom+1)
|
||||
for section := sectionFrom; section <= sectionTo; section++ {
|
||||
slots = append(slots, Slot{Week: week, DayOfWeek: dayOfWeek, Section: section})
|
||||
}
|
||||
return slots
|
||||
}
|
||||
|
||||
func slotsFromChange(change ApplyChange) []Slot {
|
||||
if len(change.Slots) > 0 {
|
||||
return append([]Slot(nil), change.Slots...)
|
||||
}
|
||||
return buildSlots(change.Week, change.DayOfWeek, change.SectionFrom, change.SectionTo)
|
||||
}
|
||||
|
||||
func mergeContinuousChanges(changes []ApplyChange) []ApplyChange {
|
||||
if len(changes) <= 1 {
|
||||
return changes
|
||||
}
|
||||
merged := make([]ApplyChange, 0, len(changes))
|
||||
for _, change := range changes {
|
||||
if len(merged) == 0 {
|
||||
merged = append(merged, change)
|
||||
continue
|
||||
}
|
||||
last := &merged[len(merged)-1]
|
||||
if canMergeChange(*last, change) {
|
||||
last.SectionTo = change.SectionTo
|
||||
last.DurationSections += change.DurationSections
|
||||
last.Slots = append(last.Slots, change.Slots...)
|
||||
continue
|
||||
}
|
||||
merged = append(merged, change)
|
||||
}
|
||||
return merged
|
||||
}
|
||||
|
||||
func canMergeChange(left ApplyChange, right ApplyChange) bool {
|
||||
return left.Type == right.Type &&
|
||||
left.TargetType == right.TargetType &&
|
||||
left.TargetID == right.TargetID &&
|
||||
left.SourceEventID == right.SourceEventID &&
|
||||
left.Week == right.Week &&
|
||||
left.DayOfWeek == right.DayOfWeek &&
|
||||
left.SectionTo+1 == right.SectionFrom &&
|
||||
left.EditedAllowed == right.EditedAllowed
|
||||
}
|
||||
|
||||
func isNoopChangeType(changeType ChangeType) bool {
|
||||
return changeType == ChangeTypeAskUser || changeType == ChangeTypeNotifyOnly || changeType == ChangeTypeClose
|
||||
}
|
||||
|
||||
func changeSortKey(change ApplyChange) string {
|
||||
return fmt.Sprintf("%s:%s:%010d:%010d:%010d:%010d:%010d",
|
||||
change.Type, change.TargetType, change.TargetID, change.SourceEventID, change.Week, change.DayOfWeek, change.SectionFrom)
|
||||
}
|
||||
|
||||
func slotSortKey(slot Slot) string {
|
||||
return fmt.Sprintf("%010d:%010d:%010d", slot.Week, slot.DayOfWeek, slot.Section)
|
||||
}
|
||||
|
||||
func changeScopeKey(change ApplyChange) string {
|
||||
return fmt.Sprintf("%s:%s:%d:%d", change.Type, change.TargetType, change.TargetID, change.SourceEventID)
|
||||
}
|
||||
|
||||
func sameChangeForScope(left ApplyChange, right ApplyChange) bool {
|
||||
return left.Type == right.Type &&
|
||||
left.TargetType == right.TargetType &&
|
||||
left.TargetID == right.TargetID &&
|
||||
left.SourceEventID == right.SourceEventID &&
|
||||
left.Week == right.Week &&
|
||||
left.DayOfWeek == right.DayOfWeek &&
|
||||
left.SectionFrom == right.SectionFrom &&
|
||||
left.SectionTo == right.SectionTo &&
|
||||
left.DurationSections == right.DurationSections
|
||||
}
|
||||
|
||||
func changeForHash(change ApplyChange) ApplyChange {
|
||||
change.NormalizedHash = ""
|
||||
return change
|
||||
}
|
||||
|
||||
func cloneMetadata(metadata map[string]string) map[string]string {
|
||||
if len(metadata) == 0 {
|
||||
return nil
|
||||
}
|
||||
cloned := make(map[string]string, len(metadata))
|
||||
for key, value := range metadata {
|
||||
cloned[key] = value
|
||||
}
|
||||
return cloned
|
||||
}
|
||||
|
||||
func firstPositive(values ...int) int {
|
||||
for _, value := range values {
|
||||
if value > 0 {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func parsePositiveInt(value string) int {
|
||||
value = strings.TrimSpace(value)
|
||||
if value == "" {
|
||||
return 0
|
||||
}
|
||||
parsed, err := strconv.Atoi(value)
|
||||
if err != nil || parsed <= 0 {
|
||||
return 0
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
99
backend/services/active_scheduler/core/apply/hash.go
Normal file
99
backend/services/active_scheduler/core/apply/hash.go
Normal file
@@ -0,0 +1,99 @@
|
||||
package apply
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type RequestHash struct {
|
||||
BodyHash string
|
||||
ApplyHash string
|
||||
ApplyID string
|
||||
}
|
||||
|
||||
// BuildConfirmRequestHash 计算 confirm 请求的幂等摘要。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. body_hash 只覆盖一次确认动作中真正影响 apply 的 body 字段;
|
||||
// 2. apply_hash 按 preview_id + idempotency_key + body_hash 计算,满足同 key 不同内容可识别;
|
||||
// 3. 不查询历史记录,是否冲突由 DetectIdempotencyConflict 或接入层唯一约束判断。
|
||||
func BuildConfirmRequestHash(previewID string, req ConfirmRequest) (RequestHash, error) {
|
||||
previewID = strings.TrimSpace(firstNonEmpty(req.PreviewID, previewID))
|
||||
idempotencyKey := strings.TrimSpace(req.IdempotencyKey)
|
||||
if previewID == "" {
|
||||
return RequestHash{}, newApplyError(ErrorCodeInvalidRequest, "preview_id 不能为空", nil)
|
||||
}
|
||||
if idempotencyKey == "" {
|
||||
return RequestHash{}, newApplyError(ErrorCodeInvalidRequest, "idempotency_key 不能为空", nil)
|
||||
}
|
||||
|
||||
bodyHash, err := hashJSON(confirmRequestBodyForHash{
|
||||
CandidateID: strings.TrimSpace(req.CandidateID),
|
||||
Action: normalizeConfirmAction(req.Action),
|
||||
EditedChanges: req.EditedChanges,
|
||||
IdempotencyKey: idempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
return RequestHash{}, err
|
||||
}
|
||||
|
||||
applyHash := sha256Text(previewID + "\n" + idempotencyKey + "\n" + bodyHash)
|
||||
applyID := "asap_" + applyHash[:24]
|
||||
return RequestHash{
|
||||
BodyHash: bodyHash,
|
||||
ApplyHash: applyHash,
|
||||
ApplyID: applyID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// BuildNormalizedChangesHash 计算转换后 changes 的稳定摘要。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只用于审计和幂等辅助,不替代正式 DB 重校验;
|
||||
// 2. 输入应是 NormalizeChanges 后的结果,避免相同语义因顺序不同得到不同摘要;
|
||||
// 3. 序列化失败会返回 invalid_request,调用方应拒绝本次 confirm。
|
||||
func BuildNormalizedChangesHash(changes []ApplyChange) (string, error) {
|
||||
return hashJSON(changes)
|
||||
}
|
||||
|
||||
type confirmRequestBodyForHash struct {
|
||||
CandidateID string `json:"candidate_id"`
|
||||
Action ConfirmAction `json:"action"`
|
||||
EditedChanges []ApplyChange `json:"edited_changes,omitempty"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
}
|
||||
|
||||
func hashJSON(value any) (string, error) {
|
||||
raw, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return "", newApplyError(ErrorCodeInvalidRequest, "请求体无法生成稳定摘要", err)
|
||||
}
|
||||
return sha256Bytes(raw), nil
|
||||
}
|
||||
|
||||
func sha256Text(text string) string {
|
||||
return sha256Bytes([]byte(text))
|
||||
}
|
||||
|
||||
func sha256Bytes(raw []byte) string {
|
||||
sum := sha256.Sum256(raw)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func normalizeConfirmAction(action ConfirmAction) ConfirmAction {
|
||||
if action == "" {
|
||||
return ConfirmActionConfirm
|
||||
}
|
||||
return action
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) != "" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
276
backend/services/active_scheduler/core/apply/types.go
Normal file
276
backend/services/active_scheduler/core/apply/types.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package apply
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ConfirmAction string
|
||||
|
||||
const (
|
||||
ConfirmActionConfirm ConfirmAction = "confirm"
|
||||
)
|
||||
|
||||
type ChangeType string
|
||||
|
||||
const (
|
||||
ChangeTypeAddTaskPoolToSchedule ChangeType = "add_task_pool_to_schedule"
|
||||
ChangeTypeCreateMakeup ChangeType = "create_makeup"
|
||||
ChangeTypeAskUser ChangeType = "ask_user"
|
||||
ChangeTypeNotifyOnly ChangeType = "notify_only"
|
||||
ChangeTypeClose ChangeType = "close"
|
||||
ChangeTypeCompressWithNextDynamicTask ChangeType = "compress_with_next_dynamic_task"
|
||||
)
|
||||
|
||||
type CommandType string
|
||||
|
||||
const (
|
||||
CommandTypeInsertTaskPoolEvent CommandType = "insert_task_pool_event"
|
||||
CommandTypeInsertMakeupEvent CommandType = "insert_makeup_event"
|
||||
)
|
||||
|
||||
type ApplyStatus string
|
||||
|
||||
const (
|
||||
ApplyStatusNone ApplyStatus = "none"
|
||||
ApplyStatusApplying ApplyStatus = "applying"
|
||||
ApplyStatusApplied ApplyStatus = "applied"
|
||||
ApplyStatusFailed ApplyStatus = "failed"
|
||||
ApplyStatusRejected ApplyStatus = "rejected"
|
||||
ApplyStatusExpired ApplyStatus = "expired"
|
||||
)
|
||||
|
||||
type ErrorCode string
|
||||
|
||||
const (
|
||||
ErrorCodeExpired ErrorCode = "expired"
|
||||
ErrorCodeIdempotencyConflict ErrorCode = "idempotency_conflict"
|
||||
ErrorCodeBaseVersionChanged ErrorCode = "base_version_changed"
|
||||
ErrorCodeTargetNotFound ErrorCode = "target_not_found"
|
||||
ErrorCodeTargetCompleted ErrorCode = "target_completed"
|
||||
ErrorCodeTargetAlreadySchedule ErrorCode = "target_already_scheduled"
|
||||
ErrorCodeSlotConflict ErrorCode = "slot_conflict"
|
||||
ErrorCodeInvalidEditedChanges ErrorCode = "invalid_edited_changes"
|
||||
ErrorCodeUnsupportedChangeType ErrorCode = "unsupported_change_type"
|
||||
ErrorCodeDBError ErrorCode = "db_error"
|
||||
ErrorCodeInvalidRequest ErrorCode = "invalid_request"
|
||||
ErrorCodeForbidden ErrorCode = "forbidden"
|
||||
ErrorCodeAlreadyApplied ErrorCode = "already_applied"
|
||||
)
|
||||
|
||||
// ApplyError 表示 confirm/apply 链路可被 API 直接映射的业务错误。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只承载错误分类和可读信息,便于主线程写入 apply_error 或转成 HTTP 响应;
|
||||
// 2. 不负责决定 preview 状态流转,状态更新仍由接入层或后续 preview repo 完成;
|
||||
// 3. Err 保留底层错误,Error() 返回中文消息,便于日志排障。
|
||||
type ApplyError struct {
|
||||
Code ErrorCode
|
||||
Message string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *ApplyError) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Message != "" {
|
||||
return fmt.Sprintf("%s: %s", e.Code, e.Message)
|
||||
}
|
||||
return string(e.Code)
|
||||
}
|
||||
|
||||
func (e *ApplyError) Unwrap() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func newApplyError(code ErrorCode, message string, err error) error {
|
||||
return &ApplyError{Code: code, Message: message, Err: err}
|
||||
}
|
||||
|
||||
// NewApplyError 构造 confirm/apply 链路可分类业务错误。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 供 service/API 层把预览归属、幂等冲突、adapter 业务拒绝转换为统一错误语义;
|
||||
// 2. 不负责写 preview 状态,也不决定 HTTP 状态码;
|
||||
// 3. cause 仅用于保留底层错误,展示给前端的文案应放在 message。
|
||||
func NewApplyError(code ErrorCode, message string, cause error) error {
|
||||
return newApplyError(code, message, cause)
|
||||
}
|
||||
|
||||
// AsApplyError 尝试把 error 还原为 ApplyError。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做 errors.As 类型判断,方便 API 层统一映射业务错误;
|
||||
// 2. 不把未知错误强行归类,避免数据库或系统故障被误判为 4xx;
|
||||
// 3. 返回 bool=false 时,调用方应按普通系统错误处理。
|
||||
func AsApplyError(err error) (*ApplyError, bool) {
|
||||
if err == nil {
|
||||
return nil, false
|
||||
}
|
||||
var applyErr *ApplyError
|
||||
if errors.As(err, &applyErr) {
|
||||
return applyErr, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func errorCodeOf(err error) ErrorCode {
|
||||
if err == nil {
|
||||
return ""
|
||||
}
|
||||
var applyErr *ApplyError
|
||||
if errors.As(err, &applyErr) {
|
||||
return applyErr.Code
|
||||
}
|
||||
return ErrorCodeDBError
|
||||
}
|
||||
|
||||
// Slot 是 confirm 请求与 apply command 之间共享的最小节次坐标。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只表达 week/day_of_week/section,不绑定 schedules 表;
|
||||
// 2. 不负责相对时间到绝对时间的转换,该转换由 apply port/adapter 完成;
|
||||
// 3. IsZero 用于识别前端未传坐标或候选 JSON 缺字段的情况。
|
||||
type Slot struct {
|
||||
Week int `json:"week"`
|
||||
DayOfWeek int `json:"day_of_week"`
|
||||
Section int `json:"section"`
|
||||
}
|
||||
|
||||
func (s Slot) IsZero() bool {
|
||||
return s.Week == 0 && s.DayOfWeek == 0 && s.Section == 0
|
||||
}
|
||||
|
||||
// SlotSpan 表示一段连续节次,供转换器展开为正式写入命令。
|
||||
type SlotSpan struct {
|
||||
Start Slot `json:"start"`
|
||||
End Slot `json:"end"`
|
||||
DurationSections int `json:"duration_sections"`
|
||||
}
|
||||
|
||||
// ApplyChange 是 confirm 请求和候选转换后的统一 change DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 表达用户最终确认的结构化变更,可来自 preview 原候选或 edited_changes;
|
||||
// 2. 不承载数据库模型,也不表示已经真实落库;
|
||||
// 3. Type 决定是否可转换为正式写入命令,ask_user/notify_only/close 会被保留为跳过项。
|
||||
type ApplyChange struct {
|
||||
ChangeID string `json:"change_id,omitempty"`
|
||||
Type ChangeType `json:"type"`
|
||||
TargetType string `json:"target_type,omitempty"`
|
||||
TargetID int `json:"target_id,omitempty"`
|
||||
TaskID int `json:"task_id,omitempty"`
|
||||
EventID int `json:"event_id,omitempty"`
|
||||
Week int `json:"week,omitempty"`
|
||||
DayOfWeek int `json:"day_of_week,omitempty"`
|
||||
SectionFrom int `json:"section_from,omitempty"`
|
||||
SectionTo int `json:"section_to,omitempty"`
|
||||
DurationSections int `json:"duration_sections,omitempty"`
|
||||
MakeupForEventID int `json:"makeup_for_event_id,omitempty"`
|
||||
SourceEventID int `json:"source_event_id,omitempty"`
|
||||
Slots []Slot `json:"slots,omitempty"`
|
||||
EditedAllowed bool `json:"edited_allowed,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
NormalizedHash string `json:"normalized_hash,omitempty"`
|
||||
}
|
||||
|
||||
// ConfirmRequest 是主动调度详情页提交确认时的入口 DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. PreviewID 可由路由层补齐,body 内没有 preview_id 时也能参与转换;
|
||||
// 2. EditedChanges 为空时,转换器会回退使用 preview 中 candidate 的原始 changes;
|
||||
// 3. IdempotencyKey 只代表一次确认动作,不代表 candidate 身份。
|
||||
type ConfirmRequest struct {
|
||||
PreviewID string `json:"preview_id,omitempty"`
|
||||
UserID int `json:"user_id,omitempty"`
|
||||
CandidateID string `json:"candidate_id"`
|
||||
Action ConfirmAction `json:"action"`
|
||||
EditedChanges []ApplyChange `json:"edited_changes,omitempty"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
RequestedAt time.Time `json:"requested_at,omitempty"`
|
||||
TraceID string `json:"trace_id,omitempty"`
|
||||
}
|
||||
|
||||
type ApplyCommand struct {
|
||||
CommandType CommandType `json:"command_type"`
|
||||
ChangeID string `json:"change_id,omitempty"`
|
||||
ChangeType ChangeType `json:"change_type"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
Slots []Slot `json:"slots"`
|
||||
SourceEventID int `json:"source_event_id,omitempty"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type SkippedChange struct {
|
||||
ChangeID string `json:"change_id,omitempty"`
|
||||
ChangeType ChangeType `json:"change_type"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
// ApplyActiveScheduleRequest 是传给正式写入 port 的请求 DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只描述已完成 preview/candidate 转换和基础校验后的写入意图;
|
||||
// 2. 不直接执行 schedules 写入,真正事务由 ScheduleApplyPort/adapter 负责;
|
||||
// 3. RequestHash 用于 preview_id + idempotency_key + body_hash 的幂等识别。
|
||||
type ApplyActiveScheduleRequest struct {
|
||||
PreviewID string `json:"preview_id"`
|
||||
ApplyID string `json:"apply_id"`
|
||||
IdempotencyKey string `json:"idempotency_key"`
|
||||
RequestHash string `json:"request_hash"`
|
||||
RequestBodyHash string `json:"request_body_hash"`
|
||||
UserID int `json:"user_id"`
|
||||
CandidateID string `json:"candidate_id"`
|
||||
BaseVersion string `json:"base_version"`
|
||||
Changes []ApplyChange `json:"changes"`
|
||||
Commands []ApplyCommand `json:"commands"`
|
||||
SkippedChanges []SkippedChange `json:"skipped_changes,omitempty"`
|
||||
NormalizedChangesHash string `json:"normalized_changes_hash"`
|
||||
RequestedAt time.Time `json:"requested_at"`
|
||||
TraceID string `json:"trace_id,omitempty"`
|
||||
}
|
||||
|
||||
type ApplyActiveScheduleResult struct {
|
||||
ApplyID string `json:"apply_id"`
|
||||
ApplyStatus ApplyStatus `json:"apply_status"`
|
||||
AppliedEventIDs []int `json:"applied_event_ids,omitempty"`
|
||||
AppliedScheduleIDs []int `json:"applied_schedule_ids,omitempty"`
|
||||
AppliedChanges []ApplyChange `json:"applied_changes,omitempty"`
|
||||
SkippedChanges []SkippedChange `json:"skipped_changes,omitempty"`
|
||||
WarningMessages []string `json:"warning_messages,omitempty"`
|
||||
ErrorCode ErrorCode `json:"error_code,omitempty"`
|
||||
ErrorMessage string `json:"error_message,omitempty"`
|
||||
RequestHash string `json:"request_hash,omitempty"`
|
||||
NormalizedChangeHash string `json:"normalized_change_hash,omitempty"`
|
||||
}
|
||||
|
||||
type ConfirmResult struct {
|
||||
PreviewID string `json:"preview_id"`
|
||||
ApplyID string `json:"apply_id"`
|
||||
ApplyStatus ApplyStatus `json:"apply_status"`
|
||||
CandidateID string `json:"candidate_id"`
|
||||
RequestHash string `json:"request_hash,omitempty"`
|
||||
RequestBodyHash string `json:"request_body_hash,omitempty"`
|
||||
ApplyRequest *ApplyActiveScheduleRequest `json:"apply_request,omitempty"`
|
||||
ApplyResult *ApplyActiveScheduleResult `json:"apply_result,omitempty"`
|
||||
SkippedChanges []SkippedChange `json:"skipped_changes,omitempty"`
|
||||
ErrorCode ErrorCode `json:"error_code,omitempty"`
|
||||
ErrorMessage string `json:"error_message,omitempty"`
|
||||
}
|
||||
|
||||
// ScheduleApplyPort 是主动调度 apply 层唯一允许调用的正式写入端口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责在事务内重读 task/schedule/task_item 真值并写入正式日程;
|
||||
// 2. 负责返回真实 applied_changes/applied_event_ids,而不是候选原始内容;
|
||||
// 3. apply 包本身不直接 import DAO 写 schedules,避免绕过既有领域能力。
|
||||
type ScheduleApplyPort interface {
|
||||
ApplyActiveScheduleChanges(ctx context.Context, req ApplyActiveScheduleRequest) (ApplyActiveScheduleResult, error)
|
||||
}
|
||||
98
backend/services/active_scheduler/core/apply/validate.go
Normal file
98
backend/services/active_scheduler/core/apply/validate.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package apply
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
)
|
||||
|
||||
// IsPreviewExpired 判断 preview 是否已经超过确认有效期。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只比较 expires_at 与调用方传入的 now;
|
||||
// 2. 不读取数据库,也不更新 preview.status;
|
||||
// 3. now 为空时按“不能安全确认”处理,避免调用方误放过过期预览。
|
||||
func IsPreviewExpired(preview model.ActiveSchedulePreview, now time.Time) bool {
|
||||
if now.IsZero() || preview.ExpiresAt.IsZero() {
|
||||
return true
|
||||
}
|
||||
return !now.Before(preview.ExpiresAt)
|
||||
}
|
||||
|
||||
// IsPreviewOwnedByUser 判断 preview 是否归属当前用户。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做 user_id 等值判断;
|
||||
// 2. userID 非法时直接返回 false;
|
||||
// 3. 不判断用户是否仍存在,该事实应由 API 鉴权或接入层保证。
|
||||
func IsPreviewOwnedByUser(preview model.ActiveSchedulePreview, userID int) bool {
|
||||
return userID > 0 && preview.UserID == userID
|
||||
}
|
||||
|
||||
// IsPreviewAlreadyApplied 判断 preview 是否已经成功应用过。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 同时兼容 preview.status 与 apply_status 两个字段;
|
||||
// 2. 只识别“已成功应用”,不把 failed/rejected 视为成功;
|
||||
// 3. 返回 true 时主线程应避免再次写正式日程。
|
||||
func IsPreviewAlreadyApplied(preview model.ActiveSchedulePreview) bool {
|
||||
return preview.Status == model.ActiveSchedulePreviewStatusApplied ||
|
||||
preview.ApplyStatus == model.ActiveScheduleApplyStatusApplied
|
||||
}
|
||||
|
||||
// ValidatePreviewConfirmable 执行 confirm 入口的基础 preview 判断。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只校验预览归属、过期、状态与已应用等轻量规则;
|
||||
// 2. 不校验 task/schedule 当前真值,也不判断冲突,正式重校验由 apply port 完成;
|
||||
// 3. 返回 nil 表示可以继续做候选转换,返回 ApplyError 表示本次 confirm 应被拒绝。
|
||||
func ValidatePreviewConfirmable(preview model.ActiveSchedulePreview, userID int, now time.Time) error {
|
||||
if preview.ID == "" {
|
||||
return newApplyError(ErrorCodeTargetNotFound, "预览不存在或未加载", nil)
|
||||
}
|
||||
if !IsPreviewOwnedByUser(preview, userID) {
|
||||
return newApplyError(ErrorCodeForbidden, "预览不属于当前用户", nil)
|
||||
}
|
||||
if IsPreviewExpired(preview, now) || preview.Status == model.ActiveSchedulePreviewStatusExpired || preview.ApplyStatus == model.ActiveScheduleApplyStatusExpired {
|
||||
return newApplyError(ErrorCodeExpired, "预览已过期,请重新生成建议", nil)
|
||||
}
|
||||
if IsPreviewAlreadyApplied(preview) {
|
||||
return newApplyError(ErrorCodeAlreadyApplied, "该预览已经应用过,不能重复写入日程", nil)
|
||||
}
|
||||
if preview.Status == model.ActiveSchedulePreviewStatusIgnored {
|
||||
return newApplyError(ErrorCodeInvalidRequest, "该预览已被忽略,不能继续确认", nil)
|
||||
}
|
||||
if preview.Status == model.ActiveSchedulePreviewStatusFailed {
|
||||
return newApplyError(ErrorCodeInvalidRequest, "该预览生成失败,不能继续确认", nil)
|
||||
}
|
||||
if preview.Status != "" && preview.Status != model.ActiveSchedulePreviewStatusReady && preview.Status != model.ActiveSchedulePreviewStatusPending {
|
||||
return newApplyError(ErrorCodeInvalidRequest, "预览状态不允许确认", nil)
|
||||
}
|
||||
if preview.ApplyStatus != "" &&
|
||||
preview.ApplyStatus != model.ActiveScheduleApplyStatusNone &&
|
||||
preview.ApplyStatus != model.ActiveScheduleApplyStatusFailed &&
|
||||
preview.ApplyStatus != model.ActiveScheduleApplyStatusRejected {
|
||||
return newApplyError(ErrorCodeInvalidRequest, "当前 apply 状态不允许重新确认", nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetectIdempotencyConflict 判断同一个 preview_id + idempotency_key 是否被复用于不同请求体。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只比较当前请求和 preview 已记录的 apply_idempotency_key / apply_request_hash;
|
||||
// 2. 不查询数据库唯一约束,主线程仍需要在事务或行锁内调用;
|
||||
// 3. 返回 true 表示必须拒绝,避免同 key 不同内容污染审计链路。
|
||||
func DetectIdempotencyConflict(preview model.ActiveSchedulePreview, requestHash string, idempotencyKey string) bool {
|
||||
if strings.TrimSpace(idempotencyKey) == "" || strings.TrimSpace(preview.ApplyIdempotencyKey) == "" {
|
||||
return false
|
||||
}
|
||||
if preview.ApplyIdempotencyKey != idempotencyKey {
|
||||
return false
|
||||
}
|
||||
if preview.ApplyRequestHash == "" {
|
||||
return false
|
||||
}
|
||||
return preview.ApplyRequestHash != requestHash
|
||||
}
|
||||
491
backend/services/active_scheduler/core/applyadapter/adapter.go
Normal file
491
backend/services/active_scheduler/core/applyadapter/adapter.go
Normal file
@@ -0,0 +1,491 @@
|
||||
package applyadapter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// GormApplyAdapter 负责把主动调度确认后的变更写入正式 schedule 表。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只写 schedule_events / schedules,并在事务内完成目标重校验与冲突重校验;
|
||||
// 2. 不回写 active_schedule_previews,不发布 outbox,不调用 API/service/task;
|
||||
// 3. 不创建 task_item,也不更新 task / task_items 状态,task_pool 是否已安排由 schedule_events 反查判断。
|
||||
type GormApplyAdapter struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
func NewGormApplyAdapter(db *gorm.DB) *GormApplyAdapter {
|
||||
return &GormApplyAdapter{db: db}
|
||||
}
|
||||
|
||||
// ApplyActiveScheduleChanges 在单个数据库事务内应用主动调度变更。
|
||||
//
|
||||
// 事务语义:
|
||||
// 1. 先规范化所有 change 的节次,并检查本次请求内部是否自相冲突;
|
||||
// 2. 事务内锁定目标事实并重查 schedules 占用,任何冲突都直接返回 slot_conflict;
|
||||
// 3. 所有 event 和 schedules 都成功插入后才提交;任一错误都会回滚,避免半写。
|
||||
//
|
||||
// 输入输出:
|
||||
// 1. req.UserID / req.PreviewID / req.Changes 必须有效;
|
||||
// 2. 返回的 AppliedEventIDs 是新建 schedule_events.id;
|
||||
// 3. error 若为 *ApplyError,上游可按 Code 分类处理。
|
||||
func (a *GormApplyAdapter) ApplyActiveScheduleChanges(ctx context.Context, req ApplyActiveScheduleRequest) (ApplyActiveScheduleResult, error) {
|
||||
if a == nil || a.db == nil {
|
||||
return ApplyActiveScheduleResult{}, newApplyError(ErrorCodeInvalidRequest, "主动调度 apply adapter 未初始化", nil)
|
||||
}
|
||||
normalized, err := normalizeRequest(req)
|
||||
if err != nil {
|
||||
return ApplyActiveScheduleResult{}, err
|
||||
}
|
||||
|
||||
result := ApplyActiveScheduleResult{ApplyID: req.ApplyID}
|
||||
err = a.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
|
||||
appliedEventIDs := make([]int, 0, len(normalized))
|
||||
appliedScheduleIDs := make([]int, 0)
|
||||
for _, change := range normalized {
|
||||
var eventIDs []int
|
||||
var scheduleIDs []int
|
||||
var applyErr error
|
||||
switch {
|
||||
case isAddTaskPoolChange(change):
|
||||
eventIDs, scheduleIDs, applyErr = a.applyTaskPoolChange(ctx, tx, req, change)
|
||||
case isCreateMakeupChange(change):
|
||||
eventIDs, scheduleIDs, applyErr = a.applyMakeupChange(ctx, tx, req, change)
|
||||
default:
|
||||
applyErr = newApplyError(ErrorCodeUnsupportedChangeType, fmt.Sprintf("不支持的主动调度变更类型:%s", change.ChangeType), nil)
|
||||
}
|
||||
if applyErr != nil {
|
||||
return applyErr
|
||||
}
|
||||
appliedEventIDs = append(appliedEventIDs, eventIDs...)
|
||||
appliedScheduleIDs = append(appliedScheduleIDs, scheduleIDs...)
|
||||
}
|
||||
result.AppliedEventIDs = appliedEventIDs
|
||||
result.AppliedScheduleIDs = appliedScheduleIDs
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ApplyActiveScheduleResult{}, classifyDBError(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (a *GormApplyAdapter) applyTaskPoolChange(ctx context.Context, tx *gorm.DB, req ApplyActiveScheduleRequest, change normalizedChange) ([]int, []int, error) {
|
||||
targetID := change.TargetID
|
||||
if change.TargetType != "" && change.TargetType != TargetTypeTaskPool {
|
||||
return nil, nil, newApplyError(ErrorCodeInvalidEditedChanges, "add_task_pool_to_schedule 只能写入 task_pool 目标", nil)
|
||||
}
|
||||
|
||||
// 调用目的:锁住同一个 task_pool 任务,串行化“是否已经进入日程”的判断,避免并发确认写出重复任务块。
|
||||
task, err := lockTaskPool(ctx, tx, req.UserID, targetID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if task.IsCompleted {
|
||||
return nil, nil, newApplyError(ErrorCodeTargetCompleted, "task_pool 任务已完成,不能再加入日程", nil)
|
||||
}
|
||||
if err := ensureTaskPoolNotScheduled(ctx, tx, req.UserID, task.ID); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := ensureSlotsFree(ctx, tx, req.UserID, change); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
eventName := strings.TrimSpace(task.Title)
|
||||
if eventName == "" {
|
||||
eventName = fmt.Sprintf("任务 %d", task.ID)
|
||||
}
|
||||
relID := task.ID
|
||||
return insertTaskEventWithSchedules(ctx, tx, req, change, eventPayload{
|
||||
Name: eventName,
|
||||
TaskSourceType: TaskSourceTypeTaskPool,
|
||||
RelID: relID,
|
||||
Sections: change.Sections,
|
||||
})
|
||||
}
|
||||
|
||||
func (a *GormApplyAdapter) applyMakeupChange(ctx context.Context, tx *gorm.DB, req ApplyActiveScheduleRequest, change normalizedChange) ([]int, []int, error) {
|
||||
target, err := resolveMakeupTarget(ctx, tx, req.UserID, change)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := ensureSlotsFree(ctx, tx, req.UserID, change); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return insertTaskEventWithSchedules(ctx, tx, req, change, eventPayload{
|
||||
Name: target.Name,
|
||||
TaskSourceType: target.TaskSourceType,
|
||||
RelID: target.RelID,
|
||||
MakeupForEventID: &target.MakeupForEventID,
|
||||
Sections: change.Sections,
|
||||
})
|
||||
}
|
||||
|
||||
type normalizedChange struct {
|
||||
ApplyChange
|
||||
Week int
|
||||
DayOfWeek int
|
||||
Sections []int
|
||||
}
|
||||
|
||||
func normalizeRequest(req ApplyActiveScheduleRequest) ([]normalizedChange, error) {
|
||||
if req.UserID <= 0 {
|
||||
return nil, newApplyError(ErrorCodeInvalidRequest, "user_id 不能为空", nil)
|
||||
}
|
||||
if strings.TrimSpace(req.PreviewID) == "" {
|
||||
return nil, newApplyError(ErrorCodeInvalidRequest, "preview_id 不能为空", nil)
|
||||
}
|
||||
if len(req.Changes) == 0 {
|
||||
return nil, newApplyError(ErrorCodeInvalidRequest, "changes 不能为空", nil)
|
||||
}
|
||||
|
||||
seenSlots := make(map[string]struct{})
|
||||
normalized := make([]normalizedChange, 0, len(req.Changes))
|
||||
for _, change := range req.Changes {
|
||||
sections, err := normalizeSections(change)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, section := range sections {
|
||||
key := fmt.Sprintf("%d:%d:%d", change.ToSlot.Start.Week, change.ToSlot.Start.DayOfWeek, section)
|
||||
if _, exists := seenSlots[key]; exists {
|
||||
return nil, newApplyError(ErrorCodeSlotConflict, "本次确认请求内部存在重复节次", nil)
|
||||
}
|
||||
seenSlots[key] = struct{}{}
|
||||
}
|
||||
normalized = append(normalized, normalizedChange{
|
||||
ApplyChange: change,
|
||||
Week: change.ToSlot.Start.Week,
|
||||
DayOfWeek: change.ToSlot.Start.DayOfWeek,
|
||||
Sections: sections,
|
||||
})
|
||||
}
|
||||
return normalized, nil
|
||||
}
|
||||
|
||||
func normalizeSections(change ApplyChange) ([]int, error) {
|
||||
if change.TargetID <= 0 {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "变更目标 ID 不能为空", nil)
|
||||
}
|
||||
if change.ToSlot == nil {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "变更缺少目标节次", nil)
|
||||
}
|
||||
start := change.ToSlot.Start
|
||||
end := change.ToSlot.End
|
||||
if start.Week <= 0 || start.DayOfWeek < 1 || start.DayOfWeek > 7 || start.Section < 1 || start.Section > 12 {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标起始节次不合法", nil)
|
||||
}
|
||||
duration := change.DurationSections
|
||||
if duration <= 0 {
|
||||
duration = change.ToSlot.DurationSections
|
||||
}
|
||||
if end.Section <= 0 && duration > 0 {
|
||||
end = Slot{Week: start.Week, DayOfWeek: start.DayOfWeek, Section: start.Section + duration - 1}
|
||||
}
|
||||
if end.Week <= 0 && end.DayOfWeek <= 0 && end.Section <= 0 {
|
||||
end = start
|
||||
}
|
||||
if end.Week != start.Week || end.DayOfWeek != start.DayOfWeek || end.Section < start.Section {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标节次必须是同一天内的连续区间", nil)
|
||||
}
|
||||
if end.Section > 12 {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标结束节次不合法", nil)
|
||||
}
|
||||
actualDuration := end.Section - start.Section + 1
|
||||
if duration > 0 && duration != actualDuration {
|
||||
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "duration_sections 与目标节次跨度不一致", nil)
|
||||
}
|
||||
sections := make([]int, 0, actualDuration)
|
||||
for section := start.Section; section <= end.Section; section++ {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
return sections, nil
|
||||
}
|
||||
|
||||
func isAddTaskPoolChange(change normalizedChange) bool {
|
||||
if change.ChangeType == ChangeTypeAddTaskPoolToSchedule {
|
||||
return true
|
||||
}
|
||||
return change.ChangeType == changeTypeAdd && change.TargetType == TargetTypeTaskPool
|
||||
}
|
||||
|
||||
func isCreateMakeupChange(change normalizedChange) bool {
|
||||
return change.ChangeType == ChangeTypeCreateMakeup
|
||||
}
|
||||
|
||||
func lockTaskPool(ctx context.Context, tx *gorm.DB, userID, taskID int) (model.Task, error) {
|
||||
var task model.Task
|
||||
err := tx.WithContext(ctx).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
Where("id = ? AND user_id = ?", taskID, userID).
|
||||
First(&task).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return model.Task{}, newApplyError(ErrorCodeTargetNotFound, "task_pool 任务不存在或不属于当前用户", nil)
|
||||
}
|
||||
return model.Task{}, newApplyError(ErrorCodeDBError, "读取 task_pool 任务失败", err)
|
||||
}
|
||||
return task, nil
|
||||
}
|
||||
|
||||
func ensureTaskPoolNotScheduled(ctx context.Context, tx *gorm.DB, userID, taskID int) error {
|
||||
var count int64
|
||||
err := tx.WithContext(ctx).
|
||||
Model(&model.ScheduleEvent{}).
|
||||
Where("user_id = ? AND type = ? AND task_source_type = ? AND rel_id = ?", userID, scheduleEventTypeTask, TaskSourceTypeTaskPool, taskID).
|
||||
Count(&count).Error
|
||||
if err != nil {
|
||||
return newApplyError(ErrorCodeDBError, "检查 task_pool 是否已进入日程失败", err)
|
||||
}
|
||||
if count > 0 {
|
||||
return newApplyError(ErrorCodeTargetAlreadyScheduled, "task_pool 任务已进入日程", nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ensureSlotsFree(ctx context.Context, tx *gorm.DB, userID int, change normalizedChange) error {
|
||||
sections := change.Sections
|
||||
if len(sections) == 0 {
|
||||
return newApplyError(ErrorCodeInvalidEditedChanges, "目标节次不能为空", nil)
|
||||
}
|
||||
sort.Ints(sections)
|
||||
startSection := sections[0]
|
||||
endSection := sections[len(sections)-1]
|
||||
|
||||
// 1. 在事务内对目标节次加行锁,命中任何已有 schedules 都视为冲突。
|
||||
// 2. 若并发事务在检查后抢先插入同一唯一键,后续 Create 会被唯一索引兜底拦截并整体回滚。
|
||||
// 3. MVP 不处理课程嵌入,任何已有课程、固定日程或任务都不可覆盖。
|
||||
var occupied []model.Schedule
|
||||
err := tx.WithContext(ctx).
|
||||
Model(&model.Schedule{}).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
Where("user_id = ? AND week = ? AND day_of_week = ? AND section IN ?", userID, change.Week, change.DayOfWeek, sections).
|
||||
Find(&occupied).Error
|
||||
if err != nil {
|
||||
return newApplyError(ErrorCodeDBError, "检查目标节次冲突失败", err)
|
||||
}
|
||||
if len(occupied) > 0 {
|
||||
return newApplyError(ErrorCodeSlotConflict, fmt.Sprintf("第 %d-%d 节已被占用", startSection, endSection), nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type eventPayload struct {
|
||||
Name string
|
||||
TaskSourceType string
|
||||
RelID int
|
||||
MakeupForEventID *int
|
||||
Sections []int
|
||||
}
|
||||
|
||||
func insertTaskEventWithSchedules(ctx context.Context, tx *gorm.DB, req ApplyActiveScheduleRequest, change normalizedChange, payload eventPayload) ([]int, []int, error) {
|
||||
sections := append([]int(nil), payload.Sections...)
|
||||
sort.Ints(sections)
|
||||
start := sections[0]
|
||||
end := sections[len(sections)-1]
|
||||
startTime, endTime, err := conv.RelativeTimeToRealTime(change.Week, change.DayOfWeek, start, end)
|
||||
if err != nil {
|
||||
return nil, nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标节次无法转换为绝对时间", err)
|
||||
}
|
||||
|
||||
previewID := strings.TrimSpace(req.PreviewID)
|
||||
event := model.ScheduleEvent{
|
||||
UserID: req.UserID,
|
||||
Name: payload.Name,
|
||||
Type: scheduleEventTypeTask,
|
||||
TaskSourceType: payload.TaskSourceType,
|
||||
RelID: &payload.RelID,
|
||||
MakeupForEventID: payload.MakeupForEventID,
|
||||
ActivePreviewID: &previewID,
|
||||
CanBeEmbedded: false,
|
||||
StartTime: startTime,
|
||||
EndTime: endTime,
|
||||
}
|
||||
if err := tx.WithContext(ctx).Create(&event).Error; err != nil {
|
||||
return nil, nil, newApplyError(ErrorCodeDBError, "写入 schedule_events 失败", err)
|
||||
}
|
||||
|
||||
schedules := make([]model.Schedule, 0, len(sections))
|
||||
for _, section := range sections {
|
||||
schedules = append(schedules, model.Schedule{
|
||||
EventID: event.ID,
|
||||
UserID: req.UserID,
|
||||
Week: change.Week,
|
||||
DayOfWeek: change.DayOfWeek,
|
||||
Section: section,
|
||||
Status: scheduleStatusNormal,
|
||||
})
|
||||
}
|
||||
if err := tx.WithContext(ctx).Create(&schedules).Error; err != nil {
|
||||
return nil, nil, newApplyError(ErrorCodeDBError, "写入 schedules 失败", err)
|
||||
}
|
||||
|
||||
scheduleIDs := make([]int, 0, len(schedules))
|
||||
for _, schedule := range schedules {
|
||||
scheduleIDs = append(scheduleIDs, schedule.ID)
|
||||
}
|
||||
return []int{event.ID}, scheduleIDs, nil
|
||||
}
|
||||
|
||||
type makeupTarget struct {
|
||||
Name string
|
||||
TaskSourceType string
|
||||
RelID int
|
||||
MakeupForEventID int
|
||||
}
|
||||
|
||||
func resolveMakeupTarget(ctx context.Context, tx *gorm.DB, userID int, change normalizedChange) (makeupTarget, error) {
|
||||
makeupForEventID := parsePositiveInt(change.Metadata["makeup_for_event_id"])
|
||||
if change.TargetType == "" || change.TargetType == TargetTypeScheduleEvent {
|
||||
if change.TargetID > 0 {
|
||||
makeupForEventID = change.TargetID
|
||||
}
|
||||
return resolveMakeupFromEvent(ctx, tx, userID, makeupForEventID)
|
||||
}
|
||||
if makeupForEventID <= 0 {
|
||||
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "create_makeup 必须提供 makeup_for_event_id", nil)
|
||||
}
|
||||
if _, err := lockScheduleEvent(ctx, tx, userID, makeupForEventID); err != nil {
|
||||
return makeupTarget{}, err
|
||||
}
|
||||
|
||||
switch change.TargetType {
|
||||
case TargetTypeTaskPool:
|
||||
task, err := lockTaskPool(ctx, tx, userID, change.TargetID)
|
||||
if err != nil {
|
||||
return makeupTarget{}, err
|
||||
}
|
||||
if task.IsCompleted {
|
||||
return makeupTarget{}, newApplyError(ErrorCodeTargetCompleted, "补做目标 task_pool 已完成", nil)
|
||||
}
|
||||
return makeupTarget{
|
||||
Name: nonEmpty(task.Title, fmt.Sprintf("任务 %d", task.ID)),
|
||||
TaskSourceType: TaskSourceTypeTaskPool,
|
||||
RelID: task.ID,
|
||||
MakeupForEventID: makeupForEventID,
|
||||
}, nil
|
||||
case TargetTypeTaskItem:
|
||||
item, err := lockTaskItemForUser(ctx, tx, userID, change.TargetID)
|
||||
if err != nil {
|
||||
return makeupTarget{}, err
|
||||
}
|
||||
return makeupTarget{
|
||||
Name: nonEmpty(stringPtrValue(item.Content), fmt.Sprintf("任务块 %d", item.ID)),
|
||||
TaskSourceType: TaskSourceTypeTaskItem,
|
||||
RelID: item.ID,
|
||||
MakeupForEventID: makeupForEventID,
|
||||
}, nil
|
||||
default:
|
||||
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "create_makeup 目标类型不合法", nil)
|
||||
}
|
||||
}
|
||||
|
||||
func resolveMakeupFromEvent(ctx context.Context, tx *gorm.DB, userID, eventID int) (makeupTarget, error) {
|
||||
event, err := lockScheduleEvent(ctx, tx, userID, eventID)
|
||||
if err != nil {
|
||||
return makeupTarget{}, err
|
||||
}
|
||||
if event.Type != scheduleEventTypeTask || event.RelID == nil || *event.RelID <= 0 {
|
||||
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "补做来源必须是已排任务日程", nil)
|
||||
}
|
||||
sourceType := event.TaskSourceType
|
||||
if sourceType == "" {
|
||||
sourceType = TaskSourceTypeTaskItem
|
||||
}
|
||||
if sourceType != TaskSourceTypeTaskItem && sourceType != TaskSourceTypeTaskPool {
|
||||
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "补做来源任务类型不合法", nil)
|
||||
}
|
||||
return makeupTarget{
|
||||
Name: nonEmpty(event.Name, fmt.Sprintf("补做任务 %d", event.ID)),
|
||||
TaskSourceType: sourceType,
|
||||
RelID: *event.RelID,
|
||||
MakeupForEventID: event.ID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func lockScheduleEvent(ctx context.Context, tx *gorm.DB, userID, eventID int) (model.ScheduleEvent, error) {
|
||||
if eventID <= 0 {
|
||||
return model.ScheduleEvent{}, newApplyError(ErrorCodeInvalidEditedChanges, "makeup_for_event_id 不能为空", nil)
|
||||
}
|
||||
var event model.ScheduleEvent
|
||||
err := tx.WithContext(ctx).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
Where("id = ? AND user_id = ?", eventID, userID).
|
||||
First(&event).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return model.ScheduleEvent{}, newApplyError(ErrorCodeTargetNotFound, "补做来源日程不存在或不属于当前用户", nil)
|
||||
}
|
||||
return model.ScheduleEvent{}, newApplyError(ErrorCodeDBError, "读取补做来源日程失败", err)
|
||||
}
|
||||
return event, nil
|
||||
}
|
||||
|
||||
func lockTaskItemForUser(ctx context.Context, tx *gorm.DB, userID, taskItemID int) (model.TaskClassItem, error) {
|
||||
var item model.TaskClassItem
|
||||
err := tx.WithContext(ctx).
|
||||
Table("task_items").
|
||||
Select("task_items.*").
|
||||
Joins("JOIN task_classes ON task_classes.id = task_items.category_id").
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
Where("task_items.id = ? AND task_classes.user_id = ?", taskItemID, userID).
|
||||
First(&item).Error
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return model.TaskClassItem{}, newApplyError(ErrorCodeTargetNotFound, "task_item 不存在或不属于当前用户", nil)
|
||||
}
|
||||
return model.TaskClassItem{}, newApplyError(ErrorCodeDBError, "读取 task_item 失败", err)
|
||||
}
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func parsePositiveInt(value string) int {
|
||||
parsed, err := strconv.Atoi(strings.TrimSpace(value))
|
||||
if err != nil || parsed <= 0 {
|
||||
return 0
|
||||
}
|
||||
return parsed
|
||||
}
|
||||
|
||||
func nonEmpty(value, fallback string) string {
|
||||
if strings.TrimSpace(value) == "" {
|
||||
return fallback
|
||||
}
|
||||
return strings.TrimSpace(value)
|
||||
}
|
||||
|
||||
func stringPtrValue(value *string) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
return *value
|
||||
}
|
||||
|
||||
func classifyDBError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
var applyErr *ApplyError
|
||||
if errors.As(err, &applyErr) {
|
||||
return applyErr
|
||||
}
|
||||
message := strings.ToLower(err.Error())
|
||||
if strings.Contains(message, "duplicate entry") ||
|
||||
strings.Contains(message, "unique constraint") ||
|
||||
strings.Contains(message, "unique violation") ||
|
||||
strings.Contains(message, "idx_user_slot_atomic") {
|
||||
return newApplyError(ErrorCodeSlotConflict, "目标节次已被其他日程占用", err)
|
||||
}
|
||||
return newApplyError(ErrorCodeDBError, "主动调度正式写库失败", err)
|
||||
}
|
||||
127
backend/services/active_scheduler/core/applyadapter/types.go
Normal file
127
backend/services/active_scheduler/core/applyadapter/types.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package applyadapter
|
||||
|
||||
import "time"
|
||||
|
||||
const (
|
||||
ChangeTypeAddTaskPoolToSchedule = "add_task_pool_to_schedule"
|
||||
ChangeTypeCreateMakeup = "create_makeup"
|
||||
|
||||
changeTypeAdd = "add"
|
||||
|
||||
TargetTypeTaskPool = "task_pool"
|
||||
TargetTypeTaskItem = "task_item"
|
||||
TargetTypeScheduleEvent = "schedule_event"
|
||||
|
||||
scheduleEventTypeTask = "task"
|
||||
scheduleStatusNormal = "normal"
|
||||
|
||||
TaskSourceTypeTaskPool = "task_pool"
|
||||
TaskSourceTypeTaskItem = "task_item"
|
||||
)
|
||||
|
||||
const (
|
||||
ErrorCodeInvalidRequest = "invalid_request"
|
||||
ErrorCodeUnsupportedChangeType = "unsupported_change_type"
|
||||
ErrorCodeTargetNotFound = "target_not_found"
|
||||
ErrorCodeTargetCompleted = "target_completed"
|
||||
ErrorCodeTargetAlreadyScheduled = "target_already_scheduled"
|
||||
ErrorCodeSlotConflict = "slot_conflict"
|
||||
ErrorCodeInvalidEditedChanges = "invalid_edited_changes"
|
||||
ErrorCodeDBError = "db_error"
|
||||
)
|
||||
|
||||
// ApplyActiveScheduleRequest 是主动调度确认后交给 schedule 域的正式写库请求。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只承载已经由上游 preview/confirm 校验过的用户、候选和变更事实;
|
||||
// 2. 不负责表达 preview 状态回写,adapter 成功后仅返回正式落库 ID;
|
||||
// 3. Changes 可以来自原始 preview_changes,也可以来自用户编辑后的 edited_changes。
|
||||
type ApplyActiveScheduleRequest struct {
|
||||
PreviewID string
|
||||
ApplyID string
|
||||
UserID int
|
||||
CandidateID string
|
||||
Changes []ApplyChange
|
||||
RequestedAt time.Time
|
||||
TraceID string
|
||||
}
|
||||
|
||||
// ApplyChange 是 apply adapter 可执行的最小变更单元。
|
||||
//
|
||||
// 字段语义:
|
||||
// 1. ChangeType 支持 add_task_pool_to_schedule / create_makeup;
|
||||
// 2. TargetType + TargetID 描述要落库的任务来源或原日程块;
|
||||
// 3. ToSlot 是最终确认后的落位节次,adapter 不信任调用方的冲突判断,会在事务内重查。
|
||||
type ApplyChange struct {
|
||||
ChangeID string
|
||||
ChangeType string
|
||||
TargetType string
|
||||
TargetID int
|
||||
ToSlot *SlotSpan
|
||||
DurationSections int
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
// Slot 描述 schedules 表的一格原子节次坐标。
|
||||
type Slot struct {
|
||||
Week int
|
||||
DayOfWeek int
|
||||
Section int
|
||||
}
|
||||
|
||||
// SlotSpan 描述一个连续节次块。
|
||||
//
|
||||
// 说明:
|
||||
// 1. Start 必填;
|
||||
// 2. End 可由 DurationSections 推导,但调用方传入时必须与 Start 同周同日且连续;
|
||||
// 3. DurationSections 小于等于 0 时,adapter 会按 Start/End 计算。
|
||||
type SlotSpan struct {
|
||||
Start Slot
|
||||
End Slot
|
||||
DurationSections int
|
||||
}
|
||||
|
||||
// ApplyActiveScheduleResult 是正式日程写库结果。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. AppliedEventIDs 返回本次新建的 schedule_events.id;
|
||||
// 2. AppliedScheduleIDs 返回本次新建的 schedules.id;
|
||||
// 3. 不包含 preview apply_status,避免 adapter 越权回写 active_schedule_previews。
|
||||
type ApplyActiveScheduleResult struct {
|
||||
ApplyID string
|
||||
AppliedEventIDs []int
|
||||
AppliedScheduleIDs []int
|
||||
}
|
||||
|
||||
// ApplyError 是 adapter 返回给上游的可分类业务错误。
|
||||
//
|
||||
// 说明:
|
||||
// 1. Code 用于上游决定 preview apply_error / 交互文案;
|
||||
// 2. Cause 保留底层错误,便于日志排障;
|
||||
// 3. Error() 面向调用方,保持中文可读。
|
||||
type ApplyError struct {
|
||||
Code string
|
||||
Message string
|
||||
Cause error
|
||||
}
|
||||
|
||||
func (e *ApplyError) Error() string {
|
||||
if e == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Cause == nil {
|
||||
return e.Message
|
||||
}
|
||||
return e.Message + ": " + e.Cause.Error()
|
||||
}
|
||||
|
||||
func (e *ApplyError) Unwrap() error {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return e.Cause
|
||||
}
|
||||
|
||||
func newApplyError(code, message string, cause error) error {
|
||||
return &ApplyError{Code: code, Message: message, Cause: cause}
|
||||
}
|
||||
340
backend/services/active_scheduler/core/candidate/candidate.go
Normal file
340
backend/services/active_scheduler/core/candidate/candidate.go
Normal file
@@ -0,0 +1,340 @@
|
||||
package candidate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
type Type string
|
||||
|
||||
const (
|
||||
TypeAddTaskPoolToSchedule Type = "add_task_pool_to_schedule"
|
||||
TypeCreateMakeup Type = "create_makeup"
|
||||
TypeAskUser Type = "ask_user"
|
||||
TypeNotifyOnly Type = "notify_only"
|
||||
TypeClose Type = "close"
|
||||
TypeCompressWithNextDynamicTask Type = "compress_with_next_dynamic_task" // 预留常量:第一版禁止生成该候选。
|
||||
)
|
||||
|
||||
type ChangeType string
|
||||
|
||||
const (
|
||||
ChangeTypeAdd ChangeType = "add"
|
||||
ChangeTypeCreateMakeup ChangeType = "create_makeup"
|
||||
ChangeTypeAskUser ChangeType = "ask_user"
|
||||
ChangeTypeNone ChangeType = "none"
|
||||
)
|
||||
|
||||
// Candidate 是主动调度后端确定性生成的候选。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只描述可写 preview 的结构化变更或非变更建议;
|
||||
// 2. 不包含 DAO model,不直接修改正式日程;
|
||||
// 3. 第一版不会生成 compress_with_next_dynamic_task。
|
||||
type Candidate struct {
|
||||
CandidateID string
|
||||
CandidateType Type
|
||||
Title string
|
||||
Summary string
|
||||
Target Target
|
||||
Changes []ChangeItem
|
||||
BeforeSummary string
|
||||
AfterSummary string
|
||||
Risk string
|
||||
Score int
|
||||
Validation Validation
|
||||
Source string
|
||||
}
|
||||
|
||||
type Target struct {
|
||||
TargetType string
|
||||
TargetID int
|
||||
Title string
|
||||
}
|
||||
|
||||
type ChangeItem struct {
|
||||
ChangeType ChangeType
|
||||
TargetType string
|
||||
TargetID int
|
||||
FromSlot *ports.Slot
|
||||
ToSlot *ports.SlotSpan
|
||||
DurationSections int
|
||||
AffectedEventIDs []int
|
||||
EditedAllowed bool
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
type Validation struct {
|
||||
Valid bool
|
||||
Reason string
|
||||
}
|
||||
|
||||
// Generator 负责枚举、校验、排序并截断候选。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只消费 context 和 observe 结果;
|
||||
// 2. 不调用 LLM,不写 preview,不发通知;
|
||||
// 3. 校验失败的候选直接丢弃,避免把合法性判断交给后续选择器。
|
||||
type Generator struct{}
|
||||
|
||||
func NewGenerator() *Generator {
|
||||
return &Generator{}
|
||||
}
|
||||
|
||||
// GenerateCandidates 执行 dry-run 主链路第三步:生成候选。
|
||||
func (g *Generator) GenerateCandidates(ctx *schedulercontext.ActiveScheduleContext, observation observe.Result) []Candidate {
|
||||
var candidates []Candidate
|
||||
for _, issue := range observation.Issues {
|
||||
switch issue.Code {
|
||||
case observe.IssueTargetCompleted, observe.IssueTargetAlreadyScheduled:
|
||||
candidates = append(candidates, closeCandidate(ctx, issue))
|
||||
case observe.IssueFeedbackTargetUnknown:
|
||||
candidates = append(candidates, askUserCandidate(ctx, issue, "我还不能确定是哪一个日程块没有完成,需要用户确认目标。"))
|
||||
case observe.IssueCanAddTaskPoolToSchedule:
|
||||
if candidate, ok := g.addTaskPoolCandidate(ctx); ok {
|
||||
candidates = append(candidates, candidate)
|
||||
}
|
||||
case observe.IssueNeedMakeupBlock:
|
||||
if candidate, ok := g.createMakeupCandidate(ctx); ok {
|
||||
candidates = append(candidates, candidate)
|
||||
}
|
||||
case observe.IssueNoFreeSlot, observe.IssueCapacityInsufficient:
|
||||
candidates = append(candidates, notifyOnlyCandidate(ctx, issue, "当前 24 小时内没有足够空位,第一版不会生成压缩融合候选。"))
|
||||
case observe.IssueNoValidTimeWindow:
|
||||
candidates = append(candidates, askUserCandidate(ctx, issue, "缺少必要时间窗或目标事实,需要用户补充后再安排。"))
|
||||
}
|
||||
}
|
||||
return trimCandidates(rankCandidates(validateCandidates(candidates)))
|
||||
}
|
||||
|
||||
func (g *Generator) addTaskPoolCandidate(ctx *schedulercontext.ActiveScheduleContext) (Candidate, bool) {
|
||||
needed := ctx.Target.EstimatedSections
|
||||
if needed <= 0 {
|
||||
needed = 1
|
||||
}
|
||||
span, ok := firstContiguousFreeSpan(ctx.ScheduleFacts.FreeSlots, needed)
|
||||
if !ok {
|
||||
return Candidate{}, false
|
||||
}
|
||||
id := fmt.Sprintf("%s:%d:%d:%d:%d", TypeAddTaskPoolToSchedule, ctx.Target.TaskID, span.Start.Week, span.Start.DayOfWeek, span.Start.Section)
|
||||
return Candidate{
|
||||
CandidateID: id,
|
||||
CandidateType: TypeAddTaskPoolToSchedule,
|
||||
Title: "加入日程",
|
||||
Summary: "把重要且紧急任务放入滚动 24 小时内的空闲节次。",
|
||||
Target: targetFromContext(ctx),
|
||||
Changes: []ChangeItem{{
|
||||
ChangeType: ChangeTypeAdd,
|
||||
TargetType: string(trigger.TargetTypeTaskPool),
|
||||
TargetID: ctx.Target.TaskID,
|
||||
ToSlot: &span,
|
||||
DurationSections: needed,
|
||||
EditedAllowed: true,
|
||||
Metadata: map[string]string{
|
||||
"task_source_type": string(trigger.TargetTypeTaskPool),
|
||||
},
|
||||
}},
|
||||
BeforeSummary: "任务尚未进入正式日程。",
|
||||
AfterSummary: "任务将占用第一个可用连续节次块。",
|
||||
Risk: "仅新增 task_pool 日程块,不移动已有日程。",
|
||||
Score: 100 - span.Start.Section,
|
||||
Validation: Validation{Valid: true},
|
||||
Source: "backend_deterministic",
|
||||
}, true
|
||||
}
|
||||
|
||||
func (g *Generator) createMakeupCandidate(ctx *schedulercontext.ActiveScheduleContext) (Candidate, bool) {
|
||||
if ctx == nil || ctx.FeedbackFacts.TargetTaskItemID <= 0 {
|
||||
return Candidate{}, false
|
||||
}
|
||||
span, ok := firstContiguousFreeSpan(ctx.ScheduleFacts.FreeSlots, 1)
|
||||
if !ok {
|
||||
return Candidate{}, false
|
||||
}
|
||||
targetID := ctx.FeedbackFacts.TargetEventID
|
||||
if targetID <= 0 {
|
||||
targetID = ctx.Trigger.TargetID
|
||||
}
|
||||
id := fmt.Sprintf("%s:%d:%d:%d:%d", TypeCreateMakeup, targetID, span.Start.Week, span.Start.DayOfWeek, span.Start.Section)
|
||||
return Candidate{
|
||||
CandidateID: id,
|
||||
CandidateType: TypeCreateMakeup,
|
||||
Title: "新增补做块",
|
||||
Summary: "为未完成的日程块新增一个补做时间,不移动原任务。",
|
||||
Target: targetFromContext(ctx),
|
||||
Changes: []ChangeItem{{
|
||||
ChangeType: ChangeTypeCreateMakeup,
|
||||
TargetType: string(trigger.TargetTypeScheduleEvent),
|
||||
TargetID: targetID,
|
||||
ToSlot: &span,
|
||||
DurationSections: 1,
|
||||
AffectedEventIDs: []int{targetID},
|
||||
EditedAllowed: true,
|
||||
Metadata: map[string]string{
|
||||
"makeup_for_event_id": fmt.Sprintf("%d", targetID),
|
||||
},
|
||||
}},
|
||||
BeforeSummary: "用户反馈该日程块未完成。",
|
||||
AfterSummary: "新增 1 节补做块,原日程不移动。",
|
||||
Risk: "第一版不做局部重排;若补做块仍不合适,需要用户手动调整。",
|
||||
Score: 90 - span.Start.Section,
|
||||
Validation: Validation{Valid: true},
|
||||
Source: "backend_deterministic",
|
||||
}, true
|
||||
}
|
||||
|
||||
func closeCandidate(ctx *schedulercontext.ActiveScheduleContext, issue observe.Issue) Candidate {
|
||||
return Candidate{
|
||||
CandidateID: fmt.Sprintf("%s:%s:%d", TypeClose, ctx.Trigger.TargetType, ctx.Trigger.TargetID),
|
||||
CandidateType: TypeClose,
|
||||
Title: "关闭主动调度",
|
||||
Summary: issue.Reason,
|
||||
Target: targetFromContext(ctx),
|
||||
Changes: []ChangeItem{{
|
||||
ChangeType: ChangeTypeNone,
|
||||
TargetType: string(ctx.Trigger.TargetType),
|
||||
TargetID: ctx.Trigger.TargetID,
|
||||
}},
|
||||
BeforeSummary: "当前事实已覆盖触发原因。",
|
||||
AfterSummary: "无需生成预览或通知。",
|
||||
Risk: "无正式日程变更。",
|
||||
Score: 0,
|
||||
Validation: Validation{Valid: true},
|
||||
Source: "backend_deterministic",
|
||||
}
|
||||
}
|
||||
|
||||
func askUserCandidate(ctx *schedulercontext.ActiveScheduleContext, issue observe.Issue, summary string) Candidate {
|
||||
return Candidate{
|
||||
CandidateID: fmt.Sprintf("%s:%s:%d", TypeAskUser, issue.Code, ctx.Trigger.TargetID),
|
||||
CandidateType: TypeAskUser,
|
||||
Title: "需要用户确认",
|
||||
Summary: summary,
|
||||
Target: targetFromContext(ctx),
|
||||
Changes: []ChangeItem{{
|
||||
ChangeType: ChangeTypeAskUser,
|
||||
TargetType: string(ctx.Trigger.TargetType),
|
||||
TargetID: ctx.Trigger.TargetID,
|
||||
}},
|
||||
BeforeSummary: "缺少安全生成调整方案所需的事实。",
|
||||
AfterSummary: "等待用户补充信息后再重新 dry-run。",
|
||||
Risk: "不会修改正式日程。",
|
||||
Score: 0,
|
||||
Validation: Validation{Valid: true},
|
||||
Source: "backend_deterministic",
|
||||
}
|
||||
}
|
||||
|
||||
func notifyOnlyCandidate(ctx *schedulercontext.ActiveScheduleContext, issue observe.Issue, summary string) Candidate {
|
||||
return Candidate{
|
||||
CandidateID: fmt.Sprintf("%s:%s:%d", TypeNotifyOnly, issue.Code, ctx.Trigger.TargetID),
|
||||
CandidateType: TypeNotifyOnly,
|
||||
Title: "仅提醒",
|
||||
Summary: summary,
|
||||
Target: targetFromContext(ctx),
|
||||
Changes: []ChangeItem{{
|
||||
ChangeType: ChangeTypeNone,
|
||||
TargetType: string(ctx.Trigger.TargetType),
|
||||
TargetID: ctx.Trigger.TargetID,
|
||||
}},
|
||||
BeforeSummary: "当前窗口没有可安全安排的连续空位。",
|
||||
AfterSummary: "不生成压缩融合或正式变更。",
|
||||
Risk: "任务可能继续保持未安排状态。",
|
||||
Score: 0,
|
||||
Validation: Validation{Valid: true},
|
||||
Source: "backend_deterministic",
|
||||
}
|
||||
}
|
||||
|
||||
func targetFromContext(ctx *schedulercontext.ActiveScheduleContext) Target {
|
||||
return Target{
|
||||
TargetType: string(ctx.Trigger.TargetType),
|
||||
TargetID: ctx.Trigger.TargetID,
|
||||
Title: ctx.Target.Title,
|
||||
}
|
||||
}
|
||||
|
||||
func firstContiguousFreeSpan(slots []ports.Slot, needed int) (ports.SlotSpan, bool) {
|
||||
if needed <= 0 {
|
||||
return ports.SlotSpan{}, false
|
||||
}
|
||||
sorted := append([]ports.Slot(nil), slots...)
|
||||
sort.Slice(sorted, func(i, j int) bool {
|
||||
return slotLess(sorted[i], sorted[j])
|
||||
})
|
||||
for i := range sorted {
|
||||
end := i + needed - 1
|
||||
if end >= len(sorted) {
|
||||
break
|
||||
}
|
||||
if isContiguous(sorted[i : end+1]) {
|
||||
return ports.SlotSpan{Start: sorted[i], End: sorted[end], DurationSections: needed}, true
|
||||
}
|
||||
}
|
||||
return ports.SlotSpan{}, false
|
||||
}
|
||||
|
||||
func isContiguous(slots []ports.Slot) bool {
|
||||
if len(slots) == 0 {
|
||||
return false
|
||||
}
|
||||
for i := 1; i < len(slots); i++ {
|
||||
prev := slots[i-1]
|
||||
curr := slots[i]
|
||||
if prev.Week != curr.Week || prev.DayOfWeek != curr.DayOfWeek || prev.Section+1 != curr.Section {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func slotLess(left, right ports.Slot) bool {
|
||||
if !left.StartAt.IsZero() && !right.StartAt.IsZero() && !left.StartAt.Equal(right.StartAt) {
|
||||
return left.StartAt.Before(right.StartAt)
|
||||
}
|
||||
if left.Week != right.Week {
|
||||
return left.Week < right.Week
|
||||
}
|
||||
if left.DayOfWeek != right.DayOfWeek {
|
||||
return left.DayOfWeek < right.DayOfWeek
|
||||
}
|
||||
return left.Section < right.Section
|
||||
}
|
||||
|
||||
func validateCandidates(candidates []Candidate) []Candidate {
|
||||
valid := make([]Candidate, 0, len(candidates))
|
||||
for _, candidate := range candidates {
|
||||
if candidate.CandidateType == TypeCompressWithNextDynamicTask {
|
||||
// 1. 压缩融合只作为 schema 预留;
|
||||
// 2. 第一版 dry-run 禁止生成,防止后续 preview/apply 误认为可以执行。
|
||||
continue
|
||||
}
|
||||
if candidate.CandidateID == "" || candidate.CandidateType == "" {
|
||||
continue
|
||||
}
|
||||
if candidate.CandidateType == TypeAddTaskPoolToSchedule && len(candidate.Changes) == 0 {
|
||||
continue
|
||||
}
|
||||
valid = append(valid, candidate)
|
||||
}
|
||||
return valid
|
||||
}
|
||||
|
||||
func rankCandidates(candidates []Candidate) []Candidate {
|
||||
sort.SliceStable(candidates, func(i, j int) bool {
|
||||
return candidates[i].Score > candidates[j].Score
|
||||
})
|
||||
return candidates
|
||||
}
|
||||
|
||||
func trimCandidates(candidates []Candidate) []Candidate {
|
||||
if len(candidates) <= 3 {
|
||||
return candidates
|
||||
}
|
||||
return candidates[:3]
|
||||
}
|
||||
220
backend/services/active_scheduler/core/context/builder.go
Normal file
220
backend/services/active_scheduler/core/context/builder.go
Normal file
@@ -0,0 +1,220 @@
|
||||
package schedulercontext
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
// Builder 负责把统一 trigger 转成主动调度只读事实快照。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只通过 ports 读取外部事实;
|
||||
// 2. 不生成 candidates,不调用 LLM,不写 preview;
|
||||
// 3. 缺少业务事实时尽量写入 MissingInfo,让 observe 阶段裁决 ask_user。
|
||||
type Builder struct {
|
||||
readers ports.Readers
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func NewBuilder(readers ports.Readers) (*Builder, error) {
|
||||
if readers.ScheduleReader == nil {
|
||||
return nil, errors.New("ScheduleReader 不能为空")
|
||||
}
|
||||
if readers.TaskReader == nil {
|
||||
return nil, errors.New("TaskReader 不能为空")
|
||||
}
|
||||
if readers.FeedbackReader == nil {
|
||||
return nil, errors.New("FeedbackReader 不能为空")
|
||||
}
|
||||
return &Builder{
|
||||
readers: readers,
|
||||
clock: time.Now,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetClock 允许测试注入稳定时钟。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 仅影响 real_now;
|
||||
// 2. 不覆盖 trigger.MockNow 的业务语义;
|
||||
// 3. nil 会被忽略,避免测试误把时钟置空。
|
||||
func (b *Builder) SetClock(clock func() time.Time) {
|
||||
if clock != nil {
|
||||
b.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// BuildContext 执行 dry-run 主链路第一步:构造主动调度上下文。
|
||||
func (b *Builder) BuildContext(ctx context.Context, trig trigger.ActiveScheduleTrigger) (*ActiveScheduleContext, error) {
|
||||
if err := trig.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
realNow := b.clock()
|
||||
effectiveNow := trig.EffectiveNow(realNow)
|
||||
windowStart := effectiveNow
|
||||
windowEnd := effectiveNow.Add(24 * time.Hour)
|
||||
result := &ActiveScheduleContext{
|
||||
Trigger: trig,
|
||||
User: UserFacts{
|
||||
UserID: trig.UserID,
|
||||
Timezone: effectiveNow.Location().String(),
|
||||
},
|
||||
Now: NowFacts{
|
||||
RealNow: realNow,
|
||||
EffectiveNow: effectiveNow,
|
||||
},
|
||||
Window: WindowFacts{
|
||||
StartAt: windowStart,
|
||||
EndAt: windowEnd,
|
||||
WindowReason: WindowReasonRolling24H,
|
||||
},
|
||||
Target: TargetFacts{SourceType: trig.TargetType},
|
||||
Trace: TraceFacts{
|
||||
TraceID: trig.TraceID,
|
||||
BuildSteps: []string{
|
||||
"1. 校验 trigger 并确定 real_now / effective_now。",
|
||||
"2. 构造滚动 24 小时时间窗,后续读取均基于同一窗口。",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch trig.TriggerType {
|
||||
case trigger.TriggerTypeImportantUrgentTask:
|
||||
if err := b.fillTaskPoolFacts(ctx, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case trigger.TriggerTypeUnfinishedFeedback:
|
||||
if err := b.fillFeedbackFacts(ctx, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.fillScheduleFacts(ctx, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.fillDerivedFacts(result)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (b *Builder) fillTaskPoolFacts(ctx context.Context, result *ActiveScheduleContext) error {
|
||||
task, found, err := b.readers.TaskReader.GetTaskForActiveSchedule(ctx, ports.TaskRequest{
|
||||
UserID: result.Trigger.UserID,
|
||||
TaskID: result.Trigger.TargetID,
|
||||
Now: result.Now.EffectiveNow,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
result.DerivedFacts.MissingInfo = append(result.DerivedFacts.MissingInfo, "target_task")
|
||||
result.Trace.Warnings = append(result.Trace.Warnings, "未读取到目标 task_pool 任务,后续应转为 ask_user。")
|
||||
return nil
|
||||
}
|
||||
|
||||
estimatedSections := task.EstimatedSections
|
||||
if estimatedSections <= 0 {
|
||||
// 1. 旧数据可能没有 estimated_sections;MVP 兜底为 1 节,避免空值阻断 dry-run。
|
||||
// 2. 正式 adapter 后续应尽量提供真实字段,减少兜底带来的预览偏差。
|
||||
estimatedSections = 1
|
||||
}
|
||||
result.TaskPoolFacts.TargetTask = &task
|
||||
result.Target.TaskID = task.ID
|
||||
result.Target.Title = task.Title
|
||||
result.Target.EstimatedSections = estimatedSections
|
||||
result.Target.DeadlineAt = task.DeadlineAt
|
||||
result.Target.UrgencyThresholdAt = task.UrgencyThresholdAt
|
||||
result.Target.Priority = task.Priority
|
||||
if task.IsCompleted {
|
||||
result.Target.Status = "completed"
|
||||
} else {
|
||||
result.Target.Status = "pending"
|
||||
}
|
||||
result.Trace.BuildSteps = append(result.Trace.BuildSteps, "3. 通过 TaskReader 读取 task_pool 目标事实。")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) fillFeedbackFacts(ctx context.Context, result *ActiveScheduleContext) error {
|
||||
feedback, found, err := b.readers.FeedbackReader.GetFeedbackSignal(ctx, ports.FeedbackRequest{
|
||||
UserID: result.Trigger.UserID,
|
||||
FeedbackID: result.Trigger.FeedbackID,
|
||||
IdempotencyKey: result.Trigger.IdempotencyKey,
|
||||
TargetType: string(result.Trigger.TargetType),
|
||||
TargetID: result.Trigger.TargetID,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
result.DerivedFacts.MissingInfo = append(result.DerivedFacts.MissingInfo, "feedback_signal")
|
||||
result.Trace.Warnings = append(result.Trace.Warnings, "未读取到反馈信号,后续应转为 ask_user。")
|
||||
return nil
|
||||
}
|
||||
|
||||
result.FeedbackFacts = FeedbackFacts{
|
||||
FeedbackID: feedback.FeedbackID,
|
||||
FeedbackText: feedback.Text,
|
||||
TargetKnown: feedback.TargetKnown,
|
||||
TargetEventID: feedback.TargetEventID,
|
||||
TargetTaskItemID: feedback.TargetTaskItemID,
|
||||
FeedbackTarget: feedback.TargetTitle,
|
||||
}
|
||||
result.Target.ScheduleEventID = feedback.TargetEventID
|
||||
result.Target.TaskItemID = feedback.TargetTaskItemID
|
||||
result.Target.Title = feedback.TargetTitle
|
||||
result.Target.EstimatedSections = 1
|
||||
if !feedback.TargetKnown {
|
||||
result.DerivedFacts.MissingInfo = append(result.DerivedFacts.MissingInfo, "feedback_target")
|
||||
}
|
||||
result.Trace.BuildSteps = append(result.Trace.BuildSteps, "3. 通过 FeedbackReader 读取 unfinished_feedback 信号。")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) fillScheduleFacts(ctx context.Context, result *ActiveScheduleContext) error {
|
||||
facts, err := b.readers.ScheduleReader.GetScheduleFactsByWindow(ctx, ports.ScheduleWindowRequest{
|
||||
UserID: result.Trigger.UserID,
|
||||
TargetType: string(result.Trigger.TargetType),
|
||||
TargetID: result.Trigger.TargetID,
|
||||
WindowStart: result.Window.StartAt,
|
||||
WindowEnd: result.Window.EndAt,
|
||||
Now: result.Now.EffectiveNow,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
result.ScheduleFacts = ScheduleFacts{
|
||||
Events: facts.Events,
|
||||
OccupiedSlots: facts.OccupiedSlots,
|
||||
FreeSlots: facts.FreeSlots,
|
||||
NextDynamicTask: facts.NextDynamicTask,
|
||||
}
|
||||
result.Window.RelativeSlots = append(result.Window.RelativeSlots, facts.OccupiedSlots...)
|
||||
result.Window.RelativeSlots = append(result.Window.RelativeSlots, facts.FreeSlots...)
|
||||
result.DerivedFacts.TargetAlreadyScheduled = facts.TargetAlreadyScheduled
|
||||
result.Trace.BuildSteps = append(result.Trace.BuildSteps, "4. 通过 ScheduleReader 读取滚动 24 小时日程事实。")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Builder) fillDerivedFacts(result *ActiveScheduleContext) {
|
||||
result.DerivedFacts.AvailableCapacity = len(result.ScheduleFacts.FreeSlots)
|
||||
if result.TaskPoolFacts.TargetTask != nil {
|
||||
result.DerivedFacts.TargetCompleted = result.TaskPoolFacts.TargetTask.IsCompleted
|
||||
}
|
||||
if result.Trigger.TriggerType == trigger.TriggerTypeUnfinishedFeedback && !result.FeedbackFacts.TargetKnown {
|
||||
result.DerivedFacts.MissingInfo = appendMissing(result.DerivedFacts.MissingInfo, "feedback_target")
|
||||
}
|
||||
result.Trace.BuildSteps = append(result.Trace.BuildSteps, "5. 汇总完成状态、已排状态、可用容量与缺失事实。")
|
||||
}
|
||||
|
||||
func appendMissing(values []string, next string) []string {
|
||||
for _, value := range values {
|
||||
if value == next {
|
||||
return values
|
||||
}
|
||||
}
|
||||
return append(values, next)
|
||||
}
|
||||
94
backend/services/active_scheduler/core/context/context.go
Normal file
94
backend/services/active_scheduler/core/context/context.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package schedulercontext
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
const (
|
||||
WindowReasonRolling24H = "rolling_24h"
|
||||
)
|
||||
|
||||
// ActiveScheduleContext 是主动调度 dry-run 的只读事实快照。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责承载 BuildContext 阶段聚合出的事实;
|
||||
// 2. 不包含 DAO、service 或 provider 实例;
|
||||
// 3. 不负责生成候选,也不负责写 preview、通知或正式日程。
|
||||
type ActiveScheduleContext struct {
|
||||
Trigger trigger.ActiveScheduleTrigger
|
||||
User UserFacts
|
||||
Now NowFacts
|
||||
Window WindowFacts
|
||||
Target TargetFacts
|
||||
TaskPoolFacts TaskPoolFacts
|
||||
ScheduleFacts ScheduleFacts
|
||||
FeedbackFacts FeedbackFacts
|
||||
DerivedFacts DerivedFacts
|
||||
Trace TraceFacts
|
||||
}
|
||||
|
||||
type UserFacts struct {
|
||||
UserID int
|
||||
Timezone string
|
||||
}
|
||||
|
||||
type NowFacts struct {
|
||||
RealNow time.Time
|
||||
EffectiveNow time.Time
|
||||
}
|
||||
|
||||
type WindowFacts struct {
|
||||
StartAt time.Time
|
||||
EndAt time.Time
|
||||
RelativeSlots []ports.Slot
|
||||
WindowReason string
|
||||
}
|
||||
|
||||
type TargetFacts struct {
|
||||
SourceType trigger.TargetType
|
||||
TaskID int
|
||||
ScheduleEventID int
|
||||
TaskItemID int
|
||||
Title string
|
||||
EstimatedSections int
|
||||
DeadlineAt *time.Time
|
||||
UrgencyThresholdAt *time.Time
|
||||
Priority int
|
||||
Status string
|
||||
}
|
||||
|
||||
type TaskPoolFacts struct {
|
||||
TargetTask *ports.TaskFact
|
||||
}
|
||||
|
||||
type ScheduleFacts struct {
|
||||
Events []ports.ScheduleEventFact
|
||||
OccupiedSlots []ports.Slot
|
||||
FreeSlots []ports.Slot
|
||||
NextDynamicTask *ports.ScheduleEventFact
|
||||
}
|
||||
|
||||
type FeedbackFacts struct {
|
||||
FeedbackID string
|
||||
FeedbackText string
|
||||
FeedbackTarget string
|
||||
TargetKnown bool
|
||||
TargetEventID int
|
||||
TargetTaskItemID int
|
||||
}
|
||||
|
||||
type DerivedFacts struct {
|
||||
TargetAlreadyScheduled bool
|
||||
TargetCompleted bool
|
||||
AvailableCapacity int
|
||||
MissingInfo []string
|
||||
}
|
||||
|
||||
type TraceFacts struct {
|
||||
TraceID string
|
||||
BuildSteps []string
|
||||
Warnings []string
|
||||
}
|
||||
84
backend/services/active_scheduler/core/feedbacklocate/dto.go
Normal file
84
backend/services/active_scheduler/core/feedbacklocate/dto.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package feedbacklocate
|
||||
|
||||
import "strings"
|
||||
|
||||
const (
|
||||
// ActionSelectCandidate 表示模型已经把补充信息定位到某个 schedule_event。
|
||||
ActionSelectCandidate = "select_candidate"
|
||||
// ActionAskUser 表示模型无法稳定定位,需要继续追问用户。
|
||||
ActionAskUser = "ask_user"
|
||||
|
||||
// TargetTypeScheduleEvent 是本阶段允许返回的唯一目标类型。
|
||||
TargetTypeScheduleEvent = "schedule_event"
|
||||
)
|
||||
|
||||
// Request 是反馈定位节点的最小输入。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只承载定位当前补充信息所需的上下文,不携带正式排程写入能力。
|
||||
// 2. 不负责候选筛选或 preview 落库,最终只返回“定位成功”或“继续追问”。
|
||||
type Request struct {
|
||||
UserID int
|
||||
UserMessage string
|
||||
PendingQuestion string
|
||||
MissingInfo []string
|
||||
}
|
||||
|
||||
// Result 是反馈定位节点的最小输出。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只表达“是否已经定位到 schedule_event”以及“是否需要继续 ask_user”。
|
||||
// 2. 不携带正式日程写入结果,也不直接产出 preview。
|
||||
type Result struct {
|
||||
Action string
|
||||
TargetType string
|
||||
TargetID int
|
||||
Reason string
|
||||
AskUserQuestion string
|
||||
}
|
||||
|
||||
// IsResolved 表示本次定位是否已经拿到可校验的 schedule_event。
|
||||
//
|
||||
// 输入输出语义:
|
||||
// 1. 只有 action=select_candidate 且 target_type=schedule_event 且 target_id>0 才算成功。
|
||||
// 2. 其余情况都视为需要继续 ask_user。
|
||||
func (r Result) IsResolved() bool {
|
||||
return strings.EqualFold(strings.TrimSpace(r.Action), ActionSelectCandidate) &&
|
||||
strings.EqualFold(strings.TrimSpace(r.TargetType), TargetTypeScheduleEvent) &&
|
||||
r.TargetID > 0
|
||||
}
|
||||
|
||||
// ShouldAskUser 表示本次定位是否应该回退为追问。
|
||||
func (r Result) ShouldAskUser() bool {
|
||||
return !r.IsResolved()
|
||||
}
|
||||
|
||||
type promptInput struct {
|
||||
GeneratedAt string `json:"generated_at"`
|
||||
UserMessage string `json:"user_message"`
|
||||
PendingQuestion string `json:"pending_question,omitempty"`
|
||||
MissingInfo []string `json:"missing_info,omitempty"`
|
||||
Window promptWindowInput `json:"window"`
|
||||
Candidates []eventCandidate `json:"candidates"`
|
||||
}
|
||||
|
||||
type promptWindowInput struct {
|
||||
StartAt string `json:"start_at"`
|
||||
EndAt string `json:"end_at"`
|
||||
}
|
||||
|
||||
type eventCandidate struct {
|
||||
TargetID int `json:"target_id"`
|
||||
Title string `json:"title"`
|
||||
SourceType string `json:"source_type,omitempty"`
|
||||
RelatedID int `json:"related_id,omitempty"`
|
||||
SlotSummary string `json:"slot_summary,omitempty"`
|
||||
}
|
||||
|
||||
type llmResponse struct {
|
||||
Action string `json:"action"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
Reason string `json:"reason"`
|
||||
AskUserQuestion string `json:"ask_user_question"`
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
package feedbacklocate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const locateSystemPrompt = `
|
||||
你是 SmartFlow 主动调度里专门负责 unfinished_feedback 的定位器。
|
||||
你的任务只有一个:根据用户补充的话,把它定位到当前滚动窗口中的某一个 schedule_event;定位不了就继续 ask_user。
|
||||
|
||||
硬规则:
|
||||
1. 只允许输出 JSON,不要输出 markdown,不要输出解释性正文。
|
||||
2. 只允许返回 action / target_type / target_id / reason / ask_user_question 这几个字段。
|
||||
3. target_type 只能是 schedule_event。
|
||||
4. target_id 必须来自候选列表里的 target_id,不要编造,不要猜一个新的。
|
||||
5. 当你不能稳定定位时,action 必须是 ask_user,并给出一句短问题。
|
||||
6. 当用户补充信息已经足够时,action 必须是 select_candidate。
|
||||
7. 请优先结合当前时间、用户原始补充话术、pending question 和候选日程的时间顺序来判断。
|
||||
`
|
||||
|
||||
func buildPromptInput(req Request, generatedAt string, windowStart string, windowEnd string, candidates []eventCandidate) promptInput {
|
||||
input := promptInput{
|
||||
GeneratedAt: generatedAt,
|
||||
UserMessage: strings.TrimSpace(req.UserMessage),
|
||||
Window: promptWindowInput{
|
||||
StartAt: windowStart,
|
||||
EndAt: windowEnd,
|
||||
},
|
||||
}
|
||||
|
||||
if trimmed := strings.TrimSpace(req.PendingQuestion); trimmed != "" {
|
||||
input.PendingQuestion = trimmed
|
||||
}
|
||||
if len(req.MissingInfo) > 0 {
|
||||
input.MissingInfo = cloneAndTrimStrings(req.MissingInfo)
|
||||
}
|
||||
input.Candidates = append([]eventCandidate(nil), candidates...)
|
||||
return input
|
||||
}
|
||||
|
||||
func buildUserPrompt(input promptInput) (string, error) {
|
||||
raw, err := json.MarshalIndent(input, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
builder.WriteString("请根据输入定位当前滚动窗口中的 schedule_event。")
|
||||
builder.WriteString("只输出 JSON,不要补充任何其它内容。\n")
|
||||
builder.WriteString("输入:\n")
|
||||
builder.WriteString(string(raw))
|
||||
return builder.String(), nil
|
||||
}
|
||||
|
||||
// BuildAskUserQuestion 负责把 missing_info 转成继续追问用户的短问题。
|
||||
func BuildAskUserQuestion(missingInfo []string) string {
|
||||
normalized := cloneAndTrimStrings(missingInfo)
|
||||
if len(normalized) == 0 {
|
||||
return "请补充能唯一定位到未完成日程块的信息。"
|
||||
}
|
||||
|
||||
for _, item := range normalized {
|
||||
if item == "feedback_target" {
|
||||
return "请告诉我你指的是哪一个未完成的日程块,比如具体时间或名称。"
|
||||
}
|
||||
}
|
||||
return "请补充 " + strings.Join(normalized, "、") + " 对应的信息。"
|
||||
}
|
||||
367
backend/services/active_scheduler/core/feedbacklocate/service.go
Normal file
367
backend/services/active_scheduler/core/feedbacklocate/service.go
Normal file
@@ -0,0 +1,367 @@
|
||||
package feedbacklocate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
)
|
||||
|
||||
const locateMaxTokens = 800
|
||||
|
||||
// Service 负责把 unfinished_feedback 的补充话术定位到当前滚动窗口内的 schedule_event。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做“定位”与“继续追问”的判断,不负责正式日程写入。
|
||||
// 2. 候选来自 ScheduleReader,JSON 判定来自 LLM,二者任一不可用时都回退为 ask_user。
|
||||
// 3. 不创建新工具系统,也不直接产出 preview。
|
||||
type Service struct {
|
||||
reader ports.ScheduleReader
|
||||
client *llmservice.Client
|
||||
clock func() time.Time
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewService 创建反馈定位服务。
|
||||
//
|
||||
// 说明:
|
||||
// 1. reader / client 允许为空,方便在模型不可用或读模型暂时不可用时直接回退 ask_user。
|
||||
// 2. 真正的定位能力只在 Resolve 内部按需启用。
|
||||
func NewService(reader ports.ScheduleReader, client *llmservice.Client) *Service {
|
||||
return &Service{
|
||||
reader: reader,
|
||||
client: client,
|
||||
clock: time.Now,
|
||||
logger: log.Default(),
|
||||
}
|
||||
}
|
||||
|
||||
// SetClock 允许测试注入稳定时间。
|
||||
func (s *Service) SetClock(clock func() time.Time) {
|
||||
if s != nil && clock != nil {
|
||||
s.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger 允许外部替换日志器。
|
||||
func (s *Service) SetLogger(logger *log.Logger) {
|
||||
if s != nil && logger != nil {
|
||||
s.logger = logger
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve 负责把用户补充信息定位到当前滚动窗口中的一个 schedule_event。
|
||||
//
|
||||
// 输入输出语义:
|
||||
// 1. 成功时返回 action=select_candidate,且 target_type=schedule_event、target_id 可校验。
|
||||
// 2. 失败时不硬猜,统一返回 action=ask_user。
|
||||
// 3. 只有上下文取消这类外部中断才会返回 error。
|
||||
func (s *Service) Resolve(ctx context.Context, req Request) (Result, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
if err := ctx.Err(); err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
if req.UserID <= 0 {
|
||||
return Result{}, errors.New("feedback locate user_id 不能为空")
|
||||
}
|
||||
|
||||
now := s.now()
|
||||
windowStart := now
|
||||
windowEnd := now.Add(24 * time.Hour)
|
||||
|
||||
candidates, err := s.loadCandidates(ctx, req.UserID, windowStart, windowEnd, now)
|
||||
if err != nil {
|
||||
return s.buildAskUserResult(req, "读取滚动窗口日程失败: "+err.Error()), nil
|
||||
}
|
||||
if len(candidates) == 0 {
|
||||
return s.buildAskUserResult(req, "当前滚动窗口内没有可定位的日程块"), nil
|
||||
}
|
||||
|
||||
if s == nil || s.client == nil {
|
||||
return s.buildAskUserResult(req, "模型暂不可用"), nil
|
||||
}
|
||||
|
||||
userPrompt, err := buildUserPrompt(buildPromptInput(
|
||||
req,
|
||||
now.In(time.Local).Format(time.RFC3339),
|
||||
windowStart.In(time.Local).Format(time.RFC3339),
|
||||
windowEnd.In(time.Local).Format(time.RFC3339),
|
||||
candidates,
|
||||
))
|
||||
if err != nil {
|
||||
return s.buildAskUserResult(req, "定位 prompt 构造失败"), nil
|
||||
}
|
||||
|
||||
messages := llmservice.BuildSystemUserMessages(strings.TrimSpace(locateSystemPrompt), nil, userPrompt)
|
||||
resp, rawResult, err := llmservice.GenerateJSON[llmResponse](
|
||||
ctx,
|
||||
s.client,
|
||||
messages,
|
||||
llmservice.GenerateOptions{
|
||||
Temperature: 0.1,
|
||||
MaxTokens: locateMaxTokens,
|
||||
Thinking: llmservice.ThinkingModeDisabled,
|
||||
Metadata: map[string]any{
|
||||
"stage": "active_scheduler_feedback_locate",
|
||||
"candidate_count": len(candidates),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
if s.logger != nil {
|
||||
s.logger.Printf("[WARN] active scheduler feedback locate failed: err=%v raw=%s", err, truncateRaw(rawResult))
|
||||
}
|
||||
return s.buildAskUserResult(req, "模型定位失败"), nil
|
||||
}
|
||||
|
||||
result, fallbackUsed := s.convertResponse(req, resp, candidates)
|
||||
if fallbackUsed && s.logger != nil {
|
||||
selectedID := 0
|
||||
action := ""
|
||||
targetType := ""
|
||||
if resp != nil {
|
||||
selectedID = resp.TargetID
|
||||
action = strings.TrimSpace(resp.Action)
|
||||
targetType = strings.TrimSpace(resp.TargetType)
|
||||
}
|
||||
s.logger.Printf("[WARN] active scheduler feedback locate fallback: action=%q target_type=%q target_id=%d", action, targetType, selectedID)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *Service) convertResponse(req Request, resp *llmResponse, candidates []eventCandidate) (Result, bool) {
|
||||
if resp == nil {
|
||||
return s.buildAskUserResult(req, "模型返回空结果"), true
|
||||
}
|
||||
|
||||
candidateMap := make(map[int]eventCandidate, len(candidates))
|
||||
for _, item := range candidates {
|
||||
candidateMap[item.TargetID] = item
|
||||
}
|
||||
|
||||
action := normalizeAction(resp.Action)
|
||||
targetType := strings.TrimSpace(resp.TargetType)
|
||||
targetID := resp.TargetID
|
||||
reason := strings.TrimSpace(resp.Reason)
|
||||
askUserQuestion := strings.TrimSpace(resp.AskUserQuestion)
|
||||
|
||||
if action == ActionSelectCandidate &&
|
||||
strings.EqualFold(targetType, TargetTypeScheduleEvent) &&
|
||||
targetID > 0 {
|
||||
if _, ok := candidateMap[targetID]; ok {
|
||||
return Result{
|
||||
Action: ActionSelectCandidate,
|
||||
TargetType: TargetTypeScheduleEvent,
|
||||
TargetID: targetID,
|
||||
Reason: reason,
|
||||
AskUserQuestion: "",
|
||||
}, false
|
||||
}
|
||||
}
|
||||
|
||||
question := firstNonEmptyString(
|
||||
askUserQuestion,
|
||||
BuildAskUserQuestion(req.MissingInfo),
|
||||
req.PendingQuestion,
|
||||
)
|
||||
return Result{
|
||||
Action: ActionAskUser,
|
||||
TargetType: TargetTypeScheduleEvent,
|
||||
TargetID: 0,
|
||||
Reason: reason,
|
||||
AskUserQuestion: question,
|
||||
}, true
|
||||
}
|
||||
|
||||
func (s *Service) buildAskUserResult(req Request, reason string) Result {
|
||||
return Result{
|
||||
Action: ActionAskUser,
|
||||
TargetType: TargetTypeScheduleEvent,
|
||||
TargetID: 0,
|
||||
Reason: strings.TrimSpace(reason),
|
||||
AskUserQuestion: firstNonEmptyString(BuildAskUserQuestion(req.MissingInfo), req.PendingQuestion),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) loadCandidates(ctx context.Context, userID int, windowStart time.Time, windowEnd time.Time, now time.Time) ([]eventCandidate, error) {
|
||||
if s == nil || s.reader == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
facts, err := s.reader.GetScheduleFactsByWindow(ctx, ports.ScheduleWindowRequest{
|
||||
UserID: userID,
|
||||
TargetType: string(trigger.TargetTypeScheduleEvent),
|
||||
TargetID: 0,
|
||||
WindowStart: windowStart,
|
||||
WindowEnd: windowEnd,
|
||||
Now: now,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buildEventCandidates(facts.Events), nil
|
||||
}
|
||||
|
||||
func buildEventCandidates(events []ports.ScheduleEventFact) []eventCandidate {
|
||||
if len(events) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
sorted := append([]ports.ScheduleEventFact(nil), events...)
|
||||
sort.SliceStable(sorted, func(i, j int) bool {
|
||||
return eventBefore(sorted[i], sorted[j])
|
||||
})
|
||||
|
||||
candidates := make([]eventCandidate, 0, len(sorted))
|
||||
for _, item := range sorted {
|
||||
candidates = append(candidates, eventCandidate{
|
||||
TargetID: item.ID,
|
||||
Title: strings.TrimSpace(item.Title),
|
||||
SourceType: strings.TrimSpace(item.SourceType),
|
||||
RelatedID: item.RelID,
|
||||
SlotSummary: summarizeSlots(item.Slots),
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
func eventBefore(left, right ports.ScheduleEventFact) bool {
|
||||
leftStart := firstSlotStart(left.Slots)
|
||||
rightStart := firstSlotStart(right.Slots)
|
||||
if !leftStart.IsZero() && !rightStart.IsZero() && !leftStart.Equal(rightStart) {
|
||||
return leftStart.Before(rightStart)
|
||||
}
|
||||
if left.ID != right.ID {
|
||||
return left.ID < right.ID
|
||||
}
|
||||
return strings.TrimSpace(left.Title) < strings.TrimSpace(right.Title)
|
||||
}
|
||||
|
||||
func firstSlotStart(slots []ports.Slot) time.Time {
|
||||
if len(slots) == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
sorted := append([]ports.Slot(nil), slots...)
|
||||
sort.SliceStable(sorted, func(i, j int) bool {
|
||||
return slotBefore(sorted[i], sorted[j])
|
||||
})
|
||||
return sorted[0].StartAt
|
||||
}
|
||||
|
||||
func slotBefore(left, right ports.Slot) bool {
|
||||
if !left.StartAt.IsZero() && !right.StartAt.IsZero() && !left.StartAt.Equal(right.StartAt) {
|
||||
return left.StartAt.Before(right.StartAt)
|
||||
}
|
||||
if left.Week != right.Week {
|
||||
return left.Week < right.Week
|
||||
}
|
||||
if left.DayOfWeek != right.DayOfWeek {
|
||||
return left.DayOfWeek < right.DayOfWeek
|
||||
}
|
||||
return left.Section < right.Section
|
||||
}
|
||||
|
||||
func summarizeSlots(slots []ports.Slot) string {
|
||||
if len(slots) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
sorted := append([]ports.Slot(nil), slots...)
|
||||
sort.SliceStable(sorted, func(i, j int) bool {
|
||||
return slotBefore(sorted[i], sorted[j])
|
||||
})
|
||||
|
||||
parts := make([]string, 0, minInt(3, len(sorted)))
|
||||
for idx, slot := range sorted {
|
||||
if idx >= 3 {
|
||||
break
|
||||
}
|
||||
parts = append(parts, summarizeSlot(slot))
|
||||
}
|
||||
if len(sorted) > 3 {
|
||||
parts = append(parts, "...")
|
||||
}
|
||||
return strings.Join(parts, ";")
|
||||
}
|
||||
|
||||
func summarizeSlot(slot ports.Slot) string {
|
||||
if !slot.StartAt.IsZero() && !slot.EndAt.IsZero() {
|
||||
return fmt.Sprintf("%s-%s", slot.StartAt.In(time.Local).Format("01-02 15:04"), slot.EndAt.In(time.Local).Format("15:04"))
|
||||
}
|
||||
return fmt.Sprintf("W%d-D%d-S%d", slot.Week, slot.DayOfWeek, slot.Section)
|
||||
}
|
||||
|
||||
func normalizeAction(raw string) string {
|
||||
switch strings.ToLower(strings.TrimSpace(raw)) {
|
||||
case ActionSelectCandidate:
|
||||
return ActionSelectCandidate
|
||||
case ActionAskUser:
|
||||
return ActionAskUser
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func firstNonEmptyString(values ...string) string {
|
||||
for _, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cloneAndTrimStrings(values []string) []string {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
result := make([]string, 0, len(values))
|
||||
seen := make(map[string]struct{}, len(values))
|
||||
for _, item := range values {
|
||||
trimmed := strings.TrimSpace(item)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[trimmed]; ok {
|
||||
continue
|
||||
}
|
||||
seen[trimmed] = struct{}{}
|
||||
result = append(result, trimmed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func truncateRaw(raw *llmservice.TextResult) string {
|
||||
if raw == nil {
|
||||
return ""
|
||||
}
|
||||
text := strings.TrimSpace(raw.Text)
|
||||
runes := []rune(text)
|
||||
if len(runes) <= 200 {
|
||||
return text
|
||||
}
|
||||
return string(runes[:200]) + "..."
|
||||
}
|
||||
|
||||
func (s *Service) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
func minInt(left, right int) int {
|
||||
if left < right {
|
||||
return left
|
||||
}
|
||||
return right
|
||||
}
|
||||
198
backend/services/active_scheduler/core/graph/runner.go
Normal file
198
backend/services/active_scheduler/core/graph/runner.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package graph
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"github.com/cloudwego/eino/compose"
|
||||
)
|
||||
|
||||
const GraphName = "active_schedule_graph"
|
||||
|
||||
const (
|
||||
NodeDryRun = "dry_run"
|
||||
NodeSelect = "select"
|
||||
)
|
||||
|
||||
// DryRunData 承载 graph 中 dry-run 节点产出的只读结果。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只装载构造 preview / 选择器所需的 dry-run 结果;
|
||||
// 2. 不包含落库状态,也不包含通知副作用;
|
||||
// 3. 由 graph runner 负责串联后续选择步骤。
|
||||
type DryRunData struct {
|
||||
Context *schedulercontext.ActiveScheduleContext
|
||||
Observation observe.Result
|
||||
Candidates []candidate.Candidate
|
||||
}
|
||||
|
||||
// DryRunFunc 描述 graph 依赖的 dry-run 执行入口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责把 trigger 变成 dry-run 结果;
|
||||
// 2. 不负责 selection / preview / notification;
|
||||
// 3. 由 service 层用现有 DryRunService 做适配注入。
|
||||
type DryRunFunc func(ctx context.Context, trig trigger.ActiveScheduleTrigger) (*DryRunData, error)
|
||||
|
||||
// Selector 描述 graph 依赖的候选选择器。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责在后端候选里做有限选择与解释生成;
|
||||
// 2. 不负责构造 dry-run 结果;
|
||||
// 3. 不负责 preview 落库与通知投递。
|
||||
type Selector interface {
|
||||
Select(ctx context.Context, req selection.SelectRequest) (selection.Result, error)
|
||||
}
|
||||
|
||||
// RunResult 是 graph 的最终输出。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只回传 dry-run 与选择结果;
|
||||
// 2. 不包含 preview / notification 的持久化结果;
|
||||
// 3. 上层 service 仍负责事务、落库和 outbox。
|
||||
type RunResult struct {
|
||||
DryRunData *DryRunData
|
||||
SelectionResult selection.Result
|
||||
}
|
||||
|
||||
type runState struct {
|
||||
Trigger trigger.ActiveScheduleTrigger
|
||||
DryRunData *DryRunData
|
||||
SelectionResult selection.Result
|
||||
}
|
||||
|
||||
// Runner 把 dry-run 与受限选择串成一条可复用的 graph。
|
||||
type Runner struct {
|
||||
dryRun DryRunFunc
|
||||
selector Selector
|
||||
}
|
||||
|
||||
// NewRunner 创建主动调度 graph runner。
|
||||
func NewRunner(dryRun DryRunFunc, selector Selector) (*Runner, error) {
|
||||
if dryRun == nil {
|
||||
return nil, errors.New("active scheduler dry-run 不能为空")
|
||||
}
|
||||
if selector == nil {
|
||||
return nil, errors.New("active scheduler selector 不能为空")
|
||||
}
|
||||
return &Runner{dryRun: dryRun, selector: selector}, nil
|
||||
}
|
||||
|
||||
// Run 执行 dry-run -> select 的 graph。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先跑 dry-run,拿到上下文、观测结果与候选;
|
||||
// 2. 再交给受限选择器做选中与解释生成;
|
||||
// 3. 任一步失败都直接返回,避免写入半截 preview;
|
||||
// 4. 上层 service 再决定是否写 preview / 发通知。
|
||||
func (r *Runner) Run(ctx context.Context, trig trigger.ActiveScheduleTrigger) (*RunResult, error) {
|
||||
if r == nil || r.dryRun == nil || r.selector == nil {
|
||||
return nil, errors.New("active scheduler graph runner 未初始化")
|
||||
}
|
||||
|
||||
state := &runState{Trigger: trig}
|
||||
g := compose.NewGraph[*runState, *runState]()
|
||||
|
||||
if err := g.AddLambdaNode(NodeDryRun, compose.InvokableLambda(r.dryRunNode())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := g.AddLambdaNode(NodeSelect, compose.InvokableLambda(r.selectNode())); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := g.AddEdge(compose.START, NodeDryRun); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := g.AddEdge(NodeDryRun, NodeSelect); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := g.AddEdge(NodeSelect, compose.END); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runnable, err := g.Compile(ctx,
|
||||
compose.WithGraphName(GraphName),
|
||||
compose.WithMaxRunSteps(8),
|
||||
compose.WithNodeTriggerMode(compose.AnyPredecessor),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
out, err := runnable.Invoke(ctx, state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if out == nil {
|
||||
return nil, errors.New("active scheduler graph 返回空状态")
|
||||
}
|
||||
return &RunResult{
|
||||
DryRunData: out.DryRunData,
|
||||
SelectionResult: out.SelectionResult,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Runner) dryRunNode() func(context.Context, *runState) (*runState, error) {
|
||||
return func(ctx context.Context, state *runState) (*runState, error) {
|
||||
if state == nil {
|
||||
return nil, errors.New("active scheduler graph state 不能为空")
|
||||
}
|
||||
if r == nil || r.dryRun == nil {
|
||||
return nil, errors.New("active scheduler dry-run 不能为空")
|
||||
}
|
||||
|
||||
// 1. 先跑 dry-run,汇集上下文、观测结果和候选,避免 selection 直接依赖数据库。
|
||||
// 2. dry-run 出错时直接返回,不进入后续选择。
|
||||
result, err := r.dryRun(ctx, state.Trigger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.DryRunData = result
|
||||
return state, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) selectNode() func(context.Context, *runState) (*runState, error) {
|
||||
return func(ctx context.Context, state *runState) (*runState, error) {
|
||||
if state == nil {
|
||||
return nil, errors.New("active scheduler graph state 不能为空")
|
||||
}
|
||||
if state.DryRunData == nil {
|
||||
return nil, errors.New("active scheduler graph 缺少 dry-run 结果")
|
||||
}
|
||||
if r == nil || r.selector == nil {
|
||||
return nil, errors.New("active scheduler selector 不能为空")
|
||||
}
|
||||
|
||||
// 1. 没有候选时,不强行调用选择器,直接返回空选择结果,交给上层决定是否继续写 preview。
|
||||
// 2. 有候选时再做受限 LLM 选择,确保模型只看后端生成的候选视图。
|
||||
if len(state.DryRunData.Candidates) == 0 {
|
||||
state.SelectionResult = selection.Result{}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
result, err := r.selector.Select(ctx, selection.SelectRequest{
|
||||
ActiveContext: state.DryRunData.Context,
|
||||
Observation: state.DryRunData.Observation,
|
||||
Candidates: state.DryRunData.Candidates,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.SelectionResult = result
|
||||
return state, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) String() string {
|
||||
if r == nil {
|
||||
return "active_scheduler_graph(nil)"
|
||||
}
|
||||
return fmt.Sprintf("active_scheduler_graph(dry_run=%t, selector=%t)", r.dryRun != nil, r.selector != nil)
|
||||
}
|
||||
269
backend/services/active_scheduler/core/job/scanner.go
Normal file
269
backend/services/active_scheduler/core/job/scanner.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultScanLimit = 50
|
||||
)
|
||||
|
||||
// Scanner 扫描到期 active_schedule_jobs 并生成正式 trigger。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责 due job -> trigger,不执行 dry-run、不写 preview、不发 notification;
|
||||
// 2. 扫描时必须重读 task 与 schedule 真值,避免过期 job 误触发;
|
||||
// 3. 对已完成、已排入日程或不再符合条件的 job,只更新 job 状态,不物理删除。
|
||||
type Scanner struct {
|
||||
activeDAO *dao.ActiveScheduleDAO
|
||||
taskReader ports.TaskReader
|
||||
scheduleReader ports.ScheduleReader
|
||||
triggerService *activesvc.TriggerService
|
||||
clock func() time.Time
|
||||
limit int
|
||||
scanEvery time.Duration
|
||||
}
|
||||
|
||||
type ScannerOptions struct {
|
||||
Limit int
|
||||
ScanEvery time.Duration
|
||||
Clock func() time.Time
|
||||
}
|
||||
|
||||
type ScanResult struct {
|
||||
Scanned int
|
||||
Triggered int
|
||||
Skipped int
|
||||
Failed int
|
||||
}
|
||||
|
||||
func NewScanner(activeDAO *dao.ActiveScheduleDAO, readers ports.Readers, triggerService *activesvc.TriggerService, options ScannerOptions) (*Scanner, error) {
|
||||
if activeDAO == nil {
|
||||
return nil, errors.New("active schedule dao 不能为空")
|
||||
}
|
||||
if readers.TaskReader == nil {
|
||||
return nil, errors.New("TaskReader 不能为空")
|
||||
}
|
||||
if readers.ScheduleReader == nil {
|
||||
return nil, errors.New("ScheduleReader 不能为空")
|
||||
}
|
||||
if triggerService == nil {
|
||||
return nil, errors.New("trigger service 不能为空")
|
||||
}
|
||||
limit := options.Limit
|
||||
if limit <= 0 {
|
||||
limit = defaultScanLimit
|
||||
}
|
||||
scanEvery := options.ScanEvery
|
||||
if scanEvery <= 0 {
|
||||
scanEvery = time.Minute
|
||||
}
|
||||
clock := options.Clock
|
||||
if clock == nil {
|
||||
clock = time.Now
|
||||
}
|
||||
return &Scanner{
|
||||
activeDAO: activeDAO,
|
||||
taskReader: readers.TaskReader,
|
||||
scheduleReader: readers.ScheduleReader,
|
||||
triggerService: triggerService,
|
||||
clock: clock,
|
||||
limit: limit,
|
||||
scanEvery: scanEvery,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start 启动 due job 周期扫描。
|
||||
//
|
||||
// 说明:
|
||||
// 1. worker/all 模式调用;api 模式不启动,避免 API 进程承担后台职责;
|
||||
// 2. 每轮扫描失败只记录日志,下一轮继续;
|
||||
// 3. ctx 取消后 goroutine 自然退出。
|
||||
func (s *Scanner) Start(ctx context.Context) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
ticker := time.NewTicker(s.scanEvery)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
result, err := s.ScanDue(ctx, s.now())
|
||||
if err != nil {
|
||||
log.Printf("主动调度 due job 扫描失败: err=%v", err)
|
||||
continue
|
||||
}
|
||||
if result.Scanned > 0 {
|
||||
log.Printf("主动调度 due job 扫描完成: scanned=%d triggered=%d skipped=%d failed=%d", result.Scanned, result.Triggered, result.Skipped, result.Failed)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// ScanDue 扫描并处理一批到期 job。
|
||||
func (s *Scanner) ScanDue(ctx context.Context, now time.Time) (ScanResult, error) {
|
||||
if s == nil || s.activeDAO == nil {
|
||||
return ScanResult{}, errors.New("scanner 未初始化")
|
||||
}
|
||||
jobs, err := s.activeDAO.ListDueJobs(ctx, now, s.limit)
|
||||
if err != nil {
|
||||
return ScanResult{}, err
|
||||
}
|
||||
result := ScanResult{Scanned: len(jobs)}
|
||||
for _, item := range jobs {
|
||||
handled, handleErr := s.processJob(ctx, item, now)
|
||||
switch {
|
||||
case handleErr != nil:
|
||||
result.Failed++
|
||||
log.Printf("主动调度 due job 处理失败: job_id=%s err=%v", item.ID, handleErr)
|
||||
case handled == model.ActiveScheduleJobStatusTriggered:
|
||||
result.Triggered++
|
||||
default:
|
||||
result.Skipped++
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *Scanner) processJob(ctx context.Context, item model.ActiveScheduleJob, now time.Time) (string, error) {
|
||||
task, found, err := s.taskReader.GetTaskForActiveSchedule(ctx, ports.TaskRequest{
|
||||
UserID: item.UserID,
|
||||
TaskID: item.TaskID,
|
||||
Now: now,
|
||||
})
|
||||
if err != nil {
|
||||
_ = s.markJobFailed(ctx, item.ID, "task_read_failed", err, now)
|
||||
return "", err
|
||||
}
|
||||
if !found {
|
||||
return model.ActiveScheduleJobStatusSkipped, s.markJobSkipped(ctx, item.ID, model.ActiveScheduleJobStatusSkipped, "task_not_found", now)
|
||||
}
|
||||
if task.IsCompleted {
|
||||
return model.ActiveScheduleJobStatusCanceled, s.markJobSkipped(ctx, item.ID, model.ActiveScheduleJobStatusCanceled, "task_completed", now)
|
||||
}
|
||||
if task.UrgencyThresholdAt == nil {
|
||||
// 1. 到期扫描必须重读 task 真值。
|
||||
// 2. 若上游已经移除了 urgency_threshold_at,说明这条 due job 已经不再具备触发前提。
|
||||
// 3. 这里直接收敛为 canceled,避免继续错误地产生 trigger。
|
||||
return model.ActiveScheduleJobStatusCanceled, s.markJobSkipped(ctx, item.ID, model.ActiveScheduleJobStatusCanceled, "task_not_schedulable", now)
|
||||
}
|
||||
if task.UrgencyThresholdAt != nil && task.UrgencyThresholdAt.After(now) {
|
||||
return model.ActiveScheduleJobStatusPending, s.activeDAO.UpdateJobFields(ctx, item.ID, map[string]any{
|
||||
"trigger_at": *task.UrgencyThresholdAt,
|
||||
"last_error_code": "threshold_moved_future",
|
||||
"last_scanned_at": &now,
|
||||
})
|
||||
}
|
||||
if task.Priority != 1 && task.Priority != 2 {
|
||||
return model.ActiveScheduleJobStatusSkipped, s.markJobSkipped(ctx, item.ID, model.ActiveScheduleJobStatusSkipped, "task_not_important", now)
|
||||
}
|
||||
alreadyScheduled, err := s.isTaskAlreadyScheduled(ctx, item.UserID, item.TaskID, now)
|
||||
if err != nil {
|
||||
_ = s.markJobFailed(ctx, item.ID, "schedule_read_failed", err, now)
|
||||
return "", err
|
||||
}
|
||||
if alreadyScheduled {
|
||||
return model.ActiveScheduleJobStatusSkipped, s.markJobSkipped(ctx, item.ID, model.ActiveScheduleJobStatusSkipped, "task_already_scheduled", now)
|
||||
}
|
||||
|
||||
payload := struct {
|
||||
JobID string `json:"job_id"`
|
||||
UrgencyThresholdAt time.Time `json:"urgency_threshold_at"`
|
||||
}{
|
||||
JobID: item.ID,
|
||||
UrgencyThresholdAt: item.TriggerAt,
|
||||
}
|
||||
rawPayload, _ := json.Marshal(payload)
|
||||
jobID := item.ID
|
||||
resp, err := s.triggerService.CreateAndPublish(ctx, activesvc.TriggerRequest{
|
||||
UserID: item.UserID,
|
||||
TriggerType: trigger.TriggerTypeImportantUrgentTask,
|
||||
Source: trigger.SourceWorkerDueJob,
|
||||
TargetType: trigger.TargetTypeTaskPool,
|
||||
TargetID: item.TaskID,
|
||||
DedupeKey: item.DedupeKey,
|
||||
RequestedAt: now,
|
||||
Payload: rawPayload,
|
||||
JobID: &jobID,
|
||||
TraceID: firstNonEmpty(item.TraceID, fmt.Sprintf("trace_active_job_%s", item.ID)),
|
||||
})
|
||||
if err != nil {
|
||||
_ = s.markJobFailed(ctx, item.ID, "trigger_publish_failed", err, now)
|
||||
return "", err
|
||||
}
|
||||
return model.ActiveScheduleJobStatusTriggered, s.activeDAO.UpdateJobFields(ctx, item.ID, map[string]any{
|
||||
"status": model.ActiveScheduleJobStatusTriggered,
|
||||
"last_trigger_id": &resp.TriggerID,
|
||||
"last_error_code": nil,
|
||||
"last_error": nil,
|
||||
"last_scanned_at": &now,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scanner) isTaskAlreadyScheduled(ctx context.Context, userID int, taskID int, now time.Time) (bool, error) {
|
||||
facts, err := s.scheduleReader.GetScheduleFactsByWindow(ctx, ports.ScheduleWindowRequest{
|
||||
UserID: userID,
|
||||
TargetType: string(trigger.TargetTypeTaskPool),
|
||||
TargetID: taskID,
|
||||
WindowStart: now,
|
||||
WindowEnd: now.Add(24 * time.Hour),
|
||||
Now: now,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return facts.TargetAlreadyScheduled, nil
|
||||
}
|
||||
|
||||
func (s *Scanner) markJobSkipped(ctx context.Context, jobID string, status string, code string, now time.Time) error {
|
||||
return s.activeDAO.UpdateJobFields(ctx, jobID, map[string]any{
|
||||
"status": status,
|
||||
"last_error_code": code,
|
||||
"last_error": nil,
|
||||
"last_scanned_at": &now,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scanner) markJobFailed(ctx context.Context, jobID string, code string, err error, now time.Time) error {
|
||||
message := ""
|
||||
if err != nil {
|
||||
message = err.Error()
|
||||
}
|
||||
return s.activeDAO.UpdateJobFields(ctx, jobID, map[string]any{
|
||||
"status": model.ActiveScheduleJobStatusFailed,
|
||||
"last_error_code": code,
|
||||
"last_error": &message,
|
||||
"last_scanned_at": &now,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *Scanner) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if value != "" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
293
backend/services/active_scheduler/core/observe/observe.go
Normal file
293
backend/services/active_scheduler/core/observe/observe.go
Normal file
@@ -0,0 +1,293 @@
|
||||
package observe
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
type DecisionAction string
|
||||
|
||||
const (
|
||||
DecisionActionClose DecisionAction = "close"
|
||||
DecisionActionAskUser DecisionAction = "ask_user"
|
||||
DecisionActionNotifyOnly DecisionAction = "notify_only"
|
||||
DecisionActionSelectCandidate DecisionAction = "select_candidate"
|
||||
)
|
||||
|
||||
type IssueCode string
|
||||
|
||||
const (
|
||||
IssueTargetCompleted IssueCode = "target_completed"
|
||||
IssueTargetAlreadyScheduled IssueCode = "target_already_scheduled"
|
||||
IssueNoValidTimeWindow IssueCode = "no_valid_time_window"
|
||||
IssueCapacityInsufficient IssueCode = "capacity_insufficient"
|
||||
IssueNoFreeSlot IssueCode = "no_free_slot"
|
||||
IssueFeedbackTargetUnknown IssueCode = "feedback_target_unknown"
|
||||
IssueNeedMakeupBlock IssueCode = "need_makeup_block"
|
||||
IssueCanAddTaskPoolToSchedule IssueCode = "can_add_task_pool_to_schedule"
|
||||
IssueCanCompressWithNextDynamicTask IssueCode = "can_compress_with_next_dynamic_task"
|
||||
)
|
||||
|
||||
// Metrics 是主动观测阶段输出的事实指标。
|
||||
type Metrics struct {
|
||||
Target TargetMetrics
|
||||
Window WindowMetrics
|
||||
Feedback FeedbackMetrics
|
||||
Risk RiskMetrics
|
||||
}
|
||||
|
||||
type TargetMetrics struct {
|
||||
Completed bool
|
||||
AlreadyScheduled bool
|
||||
DeadlineAlreadyPassed bool
|
||||
MinutesToDeadline int
|
||||
EstimatedSections int
|
||||
}
|
||||
|
||||
type WindowMetrics struct {
|
||||
TotalSlots int
|
||||
FreeSlots int
|
||||
OccupiedSlots int
|
||||
UsableSlotsBeforeDeadline int
|
||||
CapacityGap int
|
||||
}
|
||||
|
||||
type FeedbackMetrics struct {
|
||||
HasFeedback bool
|
||||
FeedbackTargetKnown bool
|
||||
UnfinishedElapsedMinutes int
|
||||
}
|
||||
|
||||
type RiskMetrics struct {
|
||||
ConflictCount int
|
||||
AffectedEventCount int
|
||||
AffectedTaskCount int
|
||||
RequiresReorder bool
|
||||
}
|
||||
|
||||
type Issue struct {
|
||||
IssueID string
|
||||
Code IssueCode
|
||||
Severity string
|
||||
TargetType string
|
||||
TargetID int
|
||||
Reason string
|
||||
Evidence map[string]string
|
||||
CanGenerateCandidate bool
|
||||
}
|
||||
|
||||
type Decision struct {
|
||||
Action DecisionAction
|
||||
ReasonCode string
|
||||
PrimaryIssueCode IssueCode
|
||||
ShouldNotify bool
|
||||
ShouldWritePreview bool
|
||||
LLMSelectionRequired bool
|
||||
FallbackCandidateID string
|
||||
}
|
||||
|
||||
type Result struct {
|
||||
Metrics Metrics
|
||||
Issues []Issue
|
||||
Decision Decision
|
||||
Trace []string
|
||||
}
|
||||
|
||||
// Analyzer 负责把 ActiveScheduleContext 转成确定性观测结果。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只生成 metrics / issues / 初步 decision;
|
||||
// 2. 不枚举候选,不调用 LLM;
|
||||
// 3. 候选生成后由 FinalizeDecision 根据候选数量收口最终 action。
|
||||
type Analyzer struct{}
|
||||
|
||||
func NewAnalyzer() *Analyzer {
|
||||
return &Analyzer{}
|
||||
}
|
||||
|
||||
// Observe 执行主动观测。
|
||||
func (a *Analyzer) Observe(ctx *schedulercontext.ActiveScheduleContext) Result {
|
||||
result := Result{
|
||||
Metrics: buildMetrics(ctx),
|
||||
Trace: []string{
|
||||
"1. 基于上下文构造 metrics,保证后续裁决只依赖结构化事实。",
|
||||
"2. 按触发类型检测 issue,不在观测阶段修改正式日程。",
|
||||
},
|
||||
}
|
||||
result.Issues = detectIssues(ctx, result.Metrics)
|
||||
result.Decision = provisionalDecision(result.Issues)
|
||||
return result
|
||||
}
|
||||
|
||||
// FinalizeDecision 根据候选生成结果收口最终裁决。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只根据后端已生成、已校验的候选收口 decision;
|
||||
// 2. 不改变候选内容;
|
||||
// 3. 候选为空时不能写 preview,必须降级为 ask_user / notify_only / close。
|
||||
func (a *Analyzer) FinalizeDecision(result Result, candidateCount int, fallbackCandidateID string) Result {
|
||||
if len(result.Issues) == 0 {
|
||||
result.Decision = Decision{Action: DecisionActionClose, ReasonCode: "no_issue"}
|
||||
return result
|
||||
}
|
||||
primary := result.Issues[0].Code
|
||||
if hasIssue(result.Issues, IssueTargetCompleted) || hasIssue(result.Issues, IssueTargetAlreadyScheduled) {
|
||||
result.Decision = Decision{Action: DecisionActionClose, ReasonCode: string(primary), PrimaryIssueCode: primary}
|
||||
return result
|
||||
}
|
||||
if hasIssue(result.Issues, IssueFeedbackTargetUnknown) || hasIssue(result.Issues, IssueNoValidTimeWindow) {
|
||||
result.Decision = Decision{Action: DecisionActionAskUser, ReasonCode: string(primary), PrimaryIssueCode: primary, ShouldNotify: true}
|
||||
return result
|
||||
}
|
||||
if candidateCount > 0 {
|
||||
result.Decision = Decision{
|
||||
Action: DecisionActionSelectCandidate,
|
||||
ReasonCode: "candidate_available",
|
||||
PrimaryIssueCode: primary,
|
||||
ShouldNotify: true,
|
||||
ShouldWritePreview: true,
|
||||
LLMSelectionRequired: true,
|
||||
FallbackCandidateID: fallbackCandidateID,
|
||||
}
|
||||
return result
|
||||
}
|
||||
result.Decision = Decision{Action: DecisionActionNotifyOnly, ReasonCode: string(primary), PrimaryIssueCode: primary, ShouldNotify: true}
|
||||
return result
|
||||
}
|
||||
|
||||
func buildMetrics(ctx *schedulercontext.ActiveScheduleContext) Metrics {
|
||||
estimated := ctx.Target.EstimatedSections
|
||||
if estimated <= 0 {
|
||||
estimated = 1
|
||||
}
|
||||
usable := countUsableSlots(ctx)
|
||||
deadlinePassed := false
|
||||
minutesToDeadline := 0
|
||||
if ctx.Target.DeadlineAt != nil {
|
||||
deadlinePassed = ctx.Target.DeadlineAt.Before(ctx.Now.EffectiveNow)
|
||||
minutesToDeadline = int(ctx.Target.DeadlineAt.Sub(ctx.Now.EffectiveNow).Minutes())
|
||||
}
|
||||
return Metrics{
|
||||
Target: TargetMetrics{
|
||||
Completed: ctx.DerivedFacts.TargetCompleted,
|
||||
AlreadyScheduled: ctx.DerivedFacts.TargetAlreadyScheduled,
|
||||
DeadlineAlreadyPassed: deadlinePassed,
|
||||
MinutesToDeadline: minutesToDeadline,
|
||||
EstimatedSections: estimated,
|
||||
},
|
||||
Window: WindowMetrics{
|
||||
TotalSlots: len(ctx.ScheduleFacts.FreeSlots) + len(ctx.ScheduleFacts.OccupiedSlots),
|
||||
FreeSlots: len(ctx.ScheduleFacts.FreeSlots),
|
||||
OccupiedSlots: len(ctx.ScheduleFacts.OccupiedSlots),
|
||||
UsableSlotsBeforeDeadline: usable,
|
||||
CapacityGap: estimated - usable,
|
||||
},
|
||||
Feedback: FeedbackMetrics{
|
||||
HasFeedback: ctx.Trigger.TriggerType == trigger.TriggerTypeUnfinishedFeedback && ctx.FeedbackFacts.FeedbackID != "",
|
||||
FeedbackTargetKnown: ctx.FeedbackFacts.TargetKnown,
|
||||
},
|
||||
Risk: RiskMetrics{
|
||||
AffectedEventCount: len(ctx.ScheduleFacts.Events),
|
||||
RequiresReorder: ctx.Trigger.TriggerType == trigger.TriggerTypeUnfinishedFeedback && len(ctx.ScheduleFacts.FreeSlots) == 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func countUsableSlots(ctx *schedulercontext.ActiveScheduleContext) int {
|
||||
if ctx.Target.DeadlineAt == nil {
|
||||
return len(ctx.ScheduleFacts.FreeSlots)
|
||||
}
|
||||
count := 0
|
||||
for _, slot := range ctx.ScheduleFacts.FreeSlots {
|
||||
if slot.StartAt.IsZero() || !slot.StartAt.After(*ctx.Target.DeadlineAt) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
func detectIssues(ctx *schedulercontext.ActiveScheduleContext, metrics Metrics) []Issue {
|
||||
switch ctx.Trigger.TriggerType {
|
||||
case trigger.TriggerTypeImportantUrgentTask:
|
||||
return detectImportantUrgentIssues(ctx, metrics)
|
||||
case trigger.TriggerTypeUnfinishedFeedback:
|
||||
return detectUnfinishedFeedbackIssues(ctx, metrics)
|
||||
default:
|
||||
return []Issue{}
|
||||
}
|
||||
}
|
||||
|
||||
func detectImportantUrgentIssues(ctx *schedulercontext.ActiveScheduleContext, metrics Metrics) []Issue {
|
||||
if metrics.Target.Completed {
|
||||
return []Issue{newIssue(IssueTargetCompleted, ctx, "目标任务已完成,主动调度无需继续处理。", false)}
|
||||
}
|
||||
if metrics.Target.AlreadyScheduled {
|
||||
return []Issue{newIssue(IssueTargetAlreadyScheduled, ctx, "目标任务已经进入日程,不能重复加入 task_pool。", false)}
|
||||
}
|
||||
if len(ctx.DerivedFacts.MissingInfo) > 0 {
|
||||
return []Issue{newIssue(IssueNoValidTimeWindow, ctx, "缺少目标任务或时间窗事实,需要用户补充信息。", false)}
|
||||
}
|
||||
if metrics.Window.FreeSlots == 0 {
|
||||
return []Issue{newIssue(IssueNoFreeSlot, ctx, "滚动 24 小时内没有可用节次。", false)}
|
||||
}
|
||||
if metrics.Window.CapacityGap > 0 {
|
||||
return []Issue{newIssue(IssueCapacityInsufficient, ctx, "可用节次不足以完整放入目标任务。", false)}
|
||||
}
|
||||
return []Issue{newIssue(IssueCanAddTaskPoolToSchedule, ctx, "目标任务可加入滚动 24 小时内的空闲节次。", true)}
|
||||
}
|
||||
|
||||
func detectUnfinishedFeedbackIssues(ctx *schedulercontext.ActiveScheduleContext, metrics Metrics) []Issue {
|
||||
if !metrics.Feedback.HasFeedback || !metrics.Feedback.FeedbackTargetKnown {
|
||||
return []Issue{newIssue(IssueFeedbackTargetUnknown, ctx, "无法确定用户反馈的未完成日程块,需要进一步确认。", false)}
|
||||
}
|
||||
if metrics.Window.FreeSlots == 0 {
|
||||
return []Issue{newIssue(IssueNoFreeSlot, ctx, "反馈目标已定位,但滚动 24 小时内没有补做空位。", false)}
|
||||
}
|
||||
return []Issue{newIssue(IssueNeedMakeupBlock, ctx, "反馈目标已定位,可生成新增补做块候选。", true)}
|
||||
}
|
||||
|
||||
func newIssue(code IssueCode, ctx *schedulercontext.ActiveScheduleContext, reason string, canGenerate bool) Issue {
|
||||
return Issue{
|
||||
IssueID: string(code) + ":1",
|
||||
Code: code,
|
||||
Severity: issueSeverity(code),
|
||||
TargetType: string(ctx.Trigger.TargetType),
|
||||
TargetID: ctx.Trigger.TargetID,
|
||||
Reason: reason,
|
||||
Evidence: map[string]string{
|
||||
"trigger_type": string(ctx.Trigger.TriggerType),
|
||||
"window_start": ctx.Window.StartAt.Format(time.RFC3339),
|
||||
"window_end": ctx.Window.EndAt.Format(time.RFC3339),
|
||||
},
|
||||
CanGenerateCandidate: canGenerate,
|
||||
}
|
||||
}
|
||||
|
||||
func issueSeverity(code IssueCode) string {
|
||||
switch code {
|
||||
case IssueTargetCompleted, IssueTargetAlreadyScheduled:
|
||||
return "info"
|
||||
case IssueFeedbackTargetUnknown, IssueNoValidTimeWindow:
|
||||
return "warning"
|
||||
default:
|
||||
return "critical"
|
||||
}
|
||||
}
|
||||
|
||||
func provisionalDecision(issues []Issue) Decision {
|
||||
if len(issues) == 0 {
|
||||
return Decision{Action: DecisionActionClose, ReasonCode: "no_issue"}
|
||||
}
|
||||
return Decision{Action: DecisionActionNotifyOnly, ReasonCode: "pending_candidates", PrimaryIssueCode: issues[0].Code}
|
||||
}
|
||||
|
||||
func hasIssue(issues []Issue, code IssueCode) bool {
|
||||
for _, issue := range issues {
|
||||
if issue.Code == code {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
132
backend/services/active_scheduler/core/ports/facts.go
Normal file
132
backend/services/active_scheduler/core/ports/facts.go
Normal file
@@ -0,0 +1,132 @@
|
||||
package ports
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Slot 是主动调度内部使用的原子节次坐标。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只描述可比较、可落预览的时间格;
|
||||
// 2. 不绑定 schedules 表模型;
|
||||
// 3. StartAt / EndAt 可为空值,排序会退回到 week/day/section。
|
||||
type Slot struct {
|
||||
Week int
|
||||
DayOfWeek int
|
||||
Section int
|
||||
StartAt time.Time
|
||||
EndAt time.Time
|
||||
}
|
||||
|
||||
// SlotSpan 表示一个连续节次块。
|
||||
type SlotSpan struct {
|
||||
Start Slot
|
||||
End Slot
|
||||
DurationSections int
|
||||
}
|
||||
|
||||
// TaskFact 是 task_pool 任务在主动调度里的最小事实快照。
|
||||
type TaskFact struct {
|
||||
ID int
|
||||
UserID int
|
||||
Title string
|
||||
Priority int
|
||||
IsCompleted bool
|
||||
DeadlineAt *time.Time
|
||||
UrgencyThresholdAt *time.Time
|
||||
EstimatedSections int
|
||||
}
|
||||
|
||||
// ScheduleEventFact 是日程块在主动调度里的最小事实快照。
|
||||
type ScheduleEventFact struct {
|
||||
ID int
|
||||
UserID int
|
||||
Title string
|
||||
SourceType string
|
||||
RelID int
|
||||
IsDynamicTask bool
|
||||
IsCompleted bool
|
||||
Slots []Slot
|
||||
TaskClassID int
|
||||
TaskItemID int
|
||||
CanBeShortened bool
|
||||
}
|
||||
|
||||
// ScheduleWindowFacts 是滚动窗口内日程事实快照。
|
||||
type ScheduleWindowFacts struct {
|
||||
Events []ScheduleEventFact
|
||||
OccupiedSlots []Slot
|
||||
FreeSlots []Slot
|
||||
NextDynamicTask *ScheduleEventFact
|
||||
TargetAlreadyScheduled bool
|
||||
}
|
||||
|
||||
// FeedbackFact 是 unfinished_feedback 的最小事实快照。
|
||||
type FeedbackFact struct {
|
||||
FeedbackID string
|
||||
Text string
|
||||
TargetKnown bool
|
||||
TargetEventID int
|
||||
TargetTaskItemID int
|
||||
TargetTitle string
|
||||
SubmittedAt time.Time
|
||||
}
|
||||
|
||||
// TaskRequest 是任务读取端口的入参。
|
||||
type TaskRequest struct {
|
||||
UserID int
|
||||
TaskID int
|
||||
Now time.Time
|
||||
}
|
||||
|
||||
// ScheduleWindowRequest 是日程窗口读取端口的入参。
|
||||
type ScheduleWindowRequest struct {
|
||||
UserID int
|
||||
TargetType string
|
||||
TargetID int
|
||||
WindowStart time.Time
|
||||
WindowEnd time.Time
|
||||
Now time.Time
|
||||
}
|
||||
|
||||
// FeedbackRequest 是反馈读取端口的入参。
|
||||
type FeedbackRequest struct {
|
||||
UserID int
|
||||
FeedbackID string
|
||||
IdempotencyKey string
|
||||
TargetType string
|
||||
TargetID int
|
||||
}
|
||||
|
||||
// TaskReader 负责读取主动调度所需的 task_pool 事实。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 可以由 adapter 调用既有 service / DAO 组装事实;
|
||||
// 2. active_scheduler 主链路只依赖该端口,不直接 import 其它领域 DAO;
|
||||
// 3. found=false 表示目标不存在或当前用户无权访问,由观察链路转成 ask_user。
|
||||
type TaskReader interface {
|
||||
GetTaskForActiveSchedule(ctx context.Context, req TaskRequest) (task TaskFact, found bool, err error)
|
||||
}
|
||||
|
||||
// ScheduleReader 负责读取滚动时间窗内的日程事实。
|
||||
type ScheduleReader interface {
|
||||
GetScheduleFactsByWindow(ctx context.Context, req ScheduleWindowRequest) (ScheduleWindowFacts, error)
|
||||
}
|
||||
|
||||
// FeedbackReader 负责读取用户反馈信号。
|
||||
type FeedbackReader interface {
|
||||
GetFeedbackSignal(ctx context.Context, req FeedbackRequest) (feedback FeedbackFact, found bool, err error)
|
||||
}
|
||||
|
||||
// Readers 聚合 dry-run 主链路依赖的外部读取端口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只聚合读取依赖,不包含正式写入 preview / schedule / notification 的能力;
|
||||
// 2. 便于 API、worker 和测试使用同一套 dry-run service;
|
||||
// 3. 任一必需端口为空时,由 service 初始化阶段拒绝。
|
||||
type Readers struct {
|
||||
TaskReader TaskReader
|
||||
ScheduleReader ScheduleReader
|
||||
FeedbackReader FeedbackReader
|
||||
}
|
||||
358
backend/services/active_scheduler/core/preview/converter.go
Normal file
358
backend/services/active_scheduler/core/preview/converter.go
Normal file
@@ -0,0 +1,358 @@
|
||||
package preview
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
)
|
||||
|
||||
func candidateDTO(item candidate.Candidate) CandidateDTO {
|
||||
return CandidateDTO{
|
||||
CandidateID: item.CandidateID,
|
||||
CandidateType: string(item.CandidateType),
|
||||
Title: item.Title,
|
||||
Summary: item.Summary,
|
||||
Target: CandidateTargetDTO{
|
||||
TargetType: item.Target.TargetType,
|
||||
TargetID: item.Target.TargetID,
|
||||
Title: item.Target.Title,
|
||||
},
|
||||
Changes: changeDTOs(item.CandidateID, item.Changes),
|
||||
BeforeSummary: item.BeforeSummary,
|
||||
AfterSummary: item.AfterSummary,
|
||||
Risk: item.Risk,
|
||||
Score: item.Score,
|
||||
Validation: item.Validation,
|
||||
Source: item.Source,
|
||||
}
|
||||
}
|
||||
|
||||
func changeDTOs(candidateID string, changes []candidate.ChangeItem) []ActiveScheduleChangeItem {
|
||||
result := make([]ActiveScheduleChangeItem, 0, len(changes))
|
||||
for index, change := range changes {
|
||||
var fromSlot *SlotDTO
|
||||
if change.FromSlot != nil {
|
||||
value := slotDTO(*change.FromSlot)
|
||||
fromSlot = &value
|
||||
}
|
||||
var toSlot *SlotSpanDTO
|
||||
if change.ToSlot != nil {
|
||||
value := slotSpanDTO(*change.ToSlot)
|
||||
toSlot = &value
|
||||
}
|
||||
result = append(result, ActiveScheduleChangeItem{
|
||||
ChangeID: fmt.Sprintf("%s:chg_%d", candidateID, index+1),
|
||||
ChangeType: string(change.ChangeType),
|
||||
TargetType: change.TargetType,
|
||||
TargetID: change.TargetID,
|
||||
FromSlot: fromSlot,
|
||||
ToSlot: toSlot,
|
||||
DurationSections: change.DurationSections,
|
||||
AffectedEventIDs: append([]int(nil), change.AffectedEventIDs...),
|
||||
EditedAllowed: change.EditedAllowed,
|
||||
Metadata: copyStringMap(change.Metadata),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func contextSummaryDTO(activeContext *schedulercontext.ActiveScheduleContext) ContextSummaryDTO {
|
||||
if activeContext == nil {
|
||||
return ContextSummaryDTO{}
|
||||
}
|
||||
return ContextSummaryDTO{
|
||||
UserID: activeContext.User.UserID,
|
||||
Timezone: activeContext.User.Timezone,
|
||||
TriggerSource: string(activeContext.Trigger.Source),
|
||||
RequestedAt: activeContext.Trigger.RequestedAt,
|
||||
WindowStart: activeContext.Window.StartAt,
|
||||
WindowEnd: activeContext.Window.EndAt,
|
||||
WindowReason: activeContext.Window.WindowReason,
|
||||
TargetType: string(activeContext.Trigger.TargetType),
|
||||
TargetID: activeContext.Trigger.TargetID,
|
||||
TargetTitle: activeContext.Target.Title,
|
||||
MissingInfo: append([]string(nil), activeContext.DerivedFacts.MissingInfo...),
|
||||
TraceSteps: append([]string(nil), activeContext.Trace.BuildSteps...),
|
||||
Warnings: append([]string(nil), activeContext.Trace.Warnings...),
|
||||
}
|
||||
}
|
||||
|
||||
func buildBeforeSummary(activeContext *schedulercontext.ActiveScheduleContext, selected candidate.Candidate, changes []ActiveScheduleChangeItem) SchedulePreviewVersion {
|
||||
version := SchedulePreviewVersion{
|
||||
Title: "调整前",
|
||||
WindowStart: activeContext.Window.StartAt,
|
||||
WindowEnd: activeContext.Window.EndAt,
|
||||
SummaryLines: compactLines(selected.BeforeSummary),
|
||||
}
|
||||
affected := affectedEventSet(changes)
|
||||
for _, event := range activeContext.ScheduleFacts.Events {
|
||||
entry := entryFromEvent(event)
|
||||
if affected[event.ID] || (selected.Target.TargetType == "schedule_event" && selected.Target.TargetID == event.ID) {
|
||||
entry.Status = "affected"
|
||||
}
|
||||
version.Entries = append(version.Entries, entry)
|
||||
}
|
||||
return version
|
||||
}
|
||||
|
||||
func buildAfterSummary(before SchedulePreviewVersion, selected candidate.Candidate, changes []ActiveScheduleChangeItem) SchedulePreviewVersion {
|
||||
after := SchedulePreviewVersion{
|
||||
Title: "调整后",
|
||||
WindowStart: before.WindowStart,
|
||||
WindowEnd: before.WindowEnd,
|
||||
Entries: append([]SchedulePreviewEntry(nil), before.Entries...),
|
||||
SummaryLines: compactLines(selected.AfterSummary),
|
||||
}
|
||||
for _, change := range changes {
|
||||
// 1. 只把会产生可视化新块的 change 追加到 after;ask_user / none 不伪造正式日程。
|
||||
// 2. 该 entry 仅用于展示和后续 confirm 校验输入,不代表已经写入 schedule_events / schedules。
|
||||
if change.ToSlot == nil || (change.ChangeType != string(candidate.ChangeTypeAdd) && change.ChangeType != string(candidate.ChangeTypeCreateMakeup)) {
|
||||
continue
|
||||
}
|
||||
after.Entries = append(after.Entries, SchedulePreviewEntry{
|
||||
EntryID: "preview:" + change.ChangeID,
|
||||
SourceType: change.TargetType,
|
||||
SourceID: change.TargetID,
|
||||
Title: selected.Target.Title,
|
||||
StartAt: change.ToSlot.Start.StartAt,
|
||||
EndAt: change.ToSlot.End.EndAt,
|
||||
Week: change.ToSlot.Start.Week,
|
||||
DayOfWeek: change.ToSlot.Start.DayOfWeek,
|
||||
SectionFrom: change.ToSlot.Start.Section,
|
||||
SectionTo: change.ToSlot.End.Section,
|
||||
Status: "added",
|
||||
Editable: change.EditedAllowed,
|
||||
})
|
||||
}
|
||||
return after
|
||||
}
|
||||
|
||||
func entryFromEvent(event ports.ScheduleEventFact) SchedulePreviewEntry {
|
||||
slots := append([]ports.Slot(nil), event.Slots...)
|
||||
sort.Slice(slots, func(i, j int) bool {
|
||||
if !slots[i].StartAt.IsZero() && !slots[j].StartAt.IsZero() && !slots[i].StartAt.Equal(slots[j].StartAt) {
|
||||
return slots[i].StartAt.Before(slots[j].StartAt)
|
||||
}
|
||||
if slots[i].Week != slots[j].Week {
|
||||
return slots[i].Week < slots[j].Week
|
||||
}
|
||||
if slots[i].DayOfWeek != slots[j].DayOfWeek {
|
||||
return slots[i].DayOfWeek < slots[j].DayOfWeek
|
||||
}
|
||||
return slots[i].Section < slots[j].Section
|
||||
})
|
||||
|
||||
entry := SchedulePreviewEntry{
|
||||
EntryID: fmt.Sprintf("%s:%d", event.SourceType, event.ID),
|
||||
SourceType: event.SourceType,
|
||||
SourceID: event.ID,
|
||||
Title: event.Title,
|
||||
Status: "unchanged",
|
||||
Editable: event.IsDynamicTask,
|
||||
}
|
||||
if len(slots) == 0 {
|
||||
return entry
|
||||
}
|
||||
first := slots[0]
|
||||
last := slots[len(slots)-1]
|
||||
entry.StartAt = first.StartAt
|
||||
entry.EndAt = last.EndAt
|
||||
entry.Week = first.Week
|
||||
entry.DayOfWeek = first.DayOfWeek
|
||||
entry.SectionFrom = first.Section
|
||||
entry.SectionTo = last.Section
|
||||
return entry
|
||||
}
|
||||
|
||||
func riskDTO(selected candidate.Candidate, observation observe.Result, changes []ActiveScheduleChangeItem, fallbackUsed bool) RiskDTO {
|
||||
affectedIDs := make([]int, 0)
|
||||
seen := make(map[int]bool)
|
||||
for _, change := range changes {
|
||||
for _, id := range change.AffectedEventIDs {
|
||||
if !seen[id] {
|
||||
seen[id] = true
|
||||
affectedIDs = append(affectedIDs, id)
|
||||
}
|
||||
}
|
||||
}
|
||||
level := "low"
|
||||
if !selected.Validation.Valid {
|
||||
level = "high"
|
||||
} else if observation.Metrics.Risk.RequiresReorder || len(affectedIDs) > 0 {
|
||||
level = "medium"
|
||||
}
|
||||
return RiskDTO{
|
||||
Level: level,
|
||||
Summary: selected.Risk,
|
||||
Validation: selected.Validation,
|
||||
RiskMetrics: observation.Metrics.Risk,
|
||||
AffectedIDs: affectedIDs,
|
||||
RequiresLLM: observation.Decision.LLMSelectionRequired,
|
||||
FallbackUsed: fallbackUsed,
|
||||
}
|
||||
}
|
||||
|
||||
func buildBaseVersion(activeContext *schedulercontext.ActiveScheduleContext, changes []ActiveScheduleChangeItem) string {
|
||||
type eventVersion struct {
|
||||
ID int `json:"id"`
|
||||
Slots []SlotDTO `json:"slots"`
|
||||
}
|
||||
events := make([]eventVersion, 0, len(activeContext.ScheduleFacts.Events))
|
||||
for _, event := range activeContext.ScheduleFacts.Events {
|
||||
slots := make([]SlotDTO, 0, len(event.Slots))
|
||||
for _, slot := range event.Slots {
|
||||
slots = append(slots, slotDTO(slot))
|
||||
}
|
||||
events = append(events, eventVersion{ID: event.ID, Slots: slots})
|
||||
}
|
||||
payload := struct {
|
||||
UserID int `json:"user_id"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
WindowStart time.Time `json:"window_start"`
|
||||
WindowEnd time.Time `json:"window_end"`
|
||||
Events []eventVersion `json:"events"`
|
||||
Changes []ActiveScheduleChangeItem `json:"changes"`
|
||||
}{
|
||||
UserID: activeContext.User.UserID,
|
||||
TargetType: string(activeContext.Trigger.TargetType),
|
||||
TargetID: activeContext.Trigger.TargetID,
|
||||
WindowStart: activeContext.Window.StartAt,
|
||||
WindowEnd: activeContext.Window.EndAt,
|
||||
Events: events,
|
||||
Changes: changes,
|
||||
}
|
||||
raw, _ := json.Marshal(payload)
|
||||
sum := sha256.Sum256(raw)
|
||||
return "sha256:" + hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func detailFromModel(row *model.ActiveSchedulePreview, now time.Time) (ActiveSchedulePreviewDetail, error) {
|
||||
selected, err := decodeJSONField(row.SelectedCandidateJSON, CandidateDTO{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
candidates, err := decodeJSONField(row.CandidatesJSON, []CandidateDTO{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
decision, err := decodeJSONField(row.DecisionJSON, observe.Decision{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
metrics, err := decodeJSONField(row.MetricsJSON, observe.Metrics{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
issues, err := decodeJSONField(row.IssuesJSON, []observe.Issue{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
contextSummary, err := decodeJSONField(row.ContextSummaryJSON, ContextSummaryDTO{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
before, err := decodeJSONField(row.BeforeSummaryJSON, SchedulePreviewVersion{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
changes, err := decodeJSONField(row.PreviewChangesJSON, []ActiveScheduleChangeItem{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
after, err := decodeJSONField(row.AfterSummaryJSON, SchedulePreviewVersion{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
risk, err := decodeJSONField(row.RiskJSON, RiskDTO{})
|
||||
if err != nil {
|
||||
return ActiveSchedulePreviewDetail{}, err
|
||||
}
|
||||
|
||||
expired := !row.ExpiresAt.After(now)
|
||||
canConfirm := row.Status == model.ActiveSchedulePreviewStatusReady && row.ApplyStatus == model.ActiveScheduleApplyStatusNone && !expired
|
||||
canIgnore := row.Status == model.ActiveSchedulePreviewStatusReady && row.ApplyStatus == model.ActiveScheduleApplyStatusNone && !expired
|
||||
|
||||
return ActiveSchedulePreviewDetail{
|
||||
PreviewID: row.ID,
|
||||
Status: row.Status,
|
||||
ApplyStatus: row.ApplyStatus,
|
||||
ExpiresAt: row.ExpiresAt,
|
||||
GeneratedAt: row.GeneratedAt,
|
||||
Expired: expired,
|
||||
Trigger: PreviewTriggerDTO{
|
||||
TriggerID: row.TriggerID,
|
||||
TriggerType: row.TriggerType,
|
||||
Source: contextSummary.TriggerSource,
|
||||
TargetType: row.TargetType,
|
||||
TargetID: row.TargetID,
|
||||
RequestedAt: contextSummary.RequestedAt,
|
||||
},
|
||||
Explanation: row.ExplanationText,
|
||||
Notification: row.NotificationSummary,
|
||||
SelectedCandidate: selected,
|
||||
Candidates: candidates,
|
||||
Decision: decision,
|
||||
Metrics: metrics,
|
||||
Issues: issues,
|
||||
ContextSummary: contextSummary,
|
||||
Before: before,
|
||||
After: after,
|
||||
Changes: changes,
|
||||
Risk: risk,
|
||||
BaseVersion: row.BaseVersion,
|
||||
CanConfirm: canConfirm,
|
||||
CanIgnore: canIgnore,
|
||||
TraceID: row.TraceID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func jsonString(value any) (string, error) {
|
||||
raw, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(raw), nil
|
||||
}
|
||||
|
||||
func compactLines(lines ...string) []string {
|
||||
result := make([]string, 0, len(lines))
|
||||
for _, line := range lines {
|
||||
line = strings.TrimSpace(line)
|
||||
if line != "" {
|
||||
result = append(result, line)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func affectedEventSet(changes []ActiveScheduleChangeItem) map[int]bool {
|
||||
result := make(map[int]bool)
|
||||
for _, change := range changes {
|
||||
for _, id := range change.AffectedEventIDs {
|
||||
result[id] = true
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func copyStringMap(input map[string]string) map[string]string {
|
||||
if len(input) == 0 {
|
||||
return nil
|
||||
}
|
||||
output := make(map[string]string, len(input))
|
||||
for key, value := range input {
|
||||
output[key] = value
|
||||
}
|
||||
return output
|
||||
}
|
||||
234
backend/services/active_scheduler/core/preview/dto.go
Normal file
234
backend/services/active_scheduler/core/preview/dto.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package preview
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
)
|
||||
|
||||
// CreatePreviewRequest 是把 dry-run 结果固化成主动调度预览的请求 DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责承载 preview 写库所需的 dry-run 结果与可选覆盖字段;
|
||||
// 2. 不承载 confirm/apply 请求,也不允许调用方传入正式日程写入参数;
|
||||
// 3. GeneratedAt 为空时由 Service 时钟生成,ExpiresAt 固定由 generated_at + 1h 推导。
|
||||
type CreatePreviewRequest struct {
|
||||
ActiveContext *schedulercontext.ActiveScheduleContext `json:"-"`
|
||||
Observation observe.Result `json:"-"`
|
||||
Candidates []candidate.Candidate `json:"-"`
|
||||
PreviewID string `json:"preview_id,omitempty"`
|
||||
TriggerID string `json:"trigger_id,omitempty"`
|
||||
SelectedCandidateID string `json:"selected_candidate_id,omitempty"`
|
||||
BaseVersion string `json:"base_version,omitempty"`
|
||||
GeneratedAt time.Time `json:"generated_at,omitempty"`
|
||||
ExplanationText string `json:"explanation_text,omitempty"`
|
||||
NotificationSummary string `json:"notification_summary,omitempty"`
|
||||
FallbackUsed bool `json:"fallback_used,omitempty"`
|
||||
}
|
||||
|
||||
// CreatePreviewResponse 是写入 preview 后可直接返回给 API 的响应 DTO。
|
||||
type CreatePreviewResponse struct {
|
||||
Detail ActiveSchedulePreviewDetail `json:"detail"`
|
||||
}
|
||||
|
||||
// GetPreviewRequest 是查询 preview 详情的请求 DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. UserID 来自鉴权上下文,不能信任前端透传;
|
||||
// 2. PreviewID 来自路由参数;
|
||||
// 3. 查询只返回预览快照,不执行过期状态回写、不触发 apply。
|
||||
type GetPreviewRequest struct {
|
||||
UserID int `json:"user_id"`
|
||||
PreviewID string `json:"preview_id"`
|
||||
}
|
||||
|
||||
// ActiveSchedulePreviewDetail 是主动调度预览详情页响应 DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责把 active_schedule_previews 中的 JSON 快照还原成前端可展示结构;
|
||||
// 2. 不包含正式写日程能力,不代表 confirm 请求已通过校验;
|
||||
// 3. CanConfirm 只表达当前快照状态可发起确认,最终是否能应用仍由 confirm/apply 链路重校验。
|
||||
type ActiveSchedulePreviewDetail struct {
|
||||
PreviewID string `json:"preview_id"`
|
||||
Status string `json:"status"`
|
||||
ApplyStatus string `json:"apply_status"`
|
||||
ExpiresAt time.Time `json:"expires_at"`
|
||||
GeneratedAt time.Time `json:"generated_at"`
|
||||
Expired bool `json:"expired"`
|
||||
Trigger PreviewTriggerDTO `json:"trigger"`
|
||||
Explanation string `json:"explanation"`
|
||||
Notification string `json:"notification_summary"`
|
||||
SelectedCandidate CandidateDTO `json:"selected_candidate"`
|
||||
Candidates []CandidateDTO `json:"candidates"`
|
||||
Decision observe.Decision `json:"decision"`
|
||||
Metrics observe.Metrics `json:"metrics"`
|
||||
Issues []observe.Issue `json:"issues"`
|
||||
ContextSummary ContextSummaryDTO `json:"context_summary"`
|
||||
Before SchedulePreviewVersion `json:"before"`
|
||||
After SchedulePreviewVersion `json:"after"`
|
||||
Changes []ActiveScheduleChangeItem `json:"changes"`
|
||||
Risk RiskDTO `json:"risk"`
|
||||
BaseVersion string `json:"base_version"`
|
||||
CanConfirm bool `json:"can_confirm"`
|
||||
CanIgnore bool `json:"can_ignore"`
|
||||
TraceID string `json:"trace_id"`
|
||||
}
|
||||
|
||||
type PreviewTriggerDTO struct {
|
||||
TriggerID string `json:"trigger_id"`
|
||||
TriggerType string `json:"trigger_type"`
|
||||
Source string `json:"source"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
RequestedAt time.Time `json:"requested_at"`
|
||||
}
|
||||
|
||||
type CandidateDTO struct {
|
||||
CandidateID string `json:"candidate_id"`
|
||||
CandidateType string `json:"candidate_type"`
|
||||
Title string `json:"title"`
|
||||
Summary string `json:"summary"`
|
||||
Target CandidateTargetDTO `json:"target"`
|
||||
Changes []ActiveScheduleChangeItem `json:"changes"`
|
||||
BeforeSummary string `json:"before_summary"`
|
||||
AfterSummary string `json:"after_summary"`
|
||||
Risk string `json:"risk"`
|
||||
Score int `json:"score"`
|
||||
Validation candidate.Validation `json:"validation"`
|
||||
Source string `json:"source"`
|
||||
}
|
||||
|
||||
type CandidateTargetDTO struct {
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
Title string `json:"title"`
|
||||
}
|
||||
|
||||
type ContextSummaryDTO struct {
|
||||
UserID int `json:"user_id"`
|
||||
Timezone string `json:"timezone"`
|
||||
TriggerSource string `json:"trigger_source"`
|
||||
RequestedAt time.Time `json:"requested_at"`
|
||||
WindowStart time.Time `json:"window_start"`
|
||||
WindowEnd time.Time `json:"window_end"`
|
||||
WindowReason string `json:"window_reason"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
TargetTitle string `json:"target_title"`
|
||||
MissingInfo []string `json:"missing_info"`
|
||||
TraceSteps []string `json:"trace_steps"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}
|
||||
|
||||
type SchedulePreviewVersion struct {
|
||||
Title string `json:"title"`
|
||||
WindowStart time.Time `json:"window_start"`
|
||||
WindowEnd time.Time `json:"window_end"`
|
||||
Entries []SchedulePreviewEntry `json:"entries"`
|
||||
SummaryLines []string `json:"summary_lines"`
|
||||
}
|
||||
|
||||
type SchedulePreviewEntry struct {
|
||||
EntryID string `json:"entry_id"`
|
||||
SourceType string `json:"source_type"`
|
||||
SourceID int `json:"source_id"`
|
||||
Title string `json:"title"`
|
||||
StartAt time.Time `json:"start_at,omitempty"`
|
||||
EndAt time.Time `json:"end_at,omitempty"`
|
||||
Week int `json:"week,omitempty"`
|
||||
DayOfWeek int `json:"day_of_week,omitempty"`
|
||||
SectionFrom int `json:"section_from,omitempty"`
|
||||
SectionTo int `json:"section_to,omitempty"`
|
||||
Status string `json:"status"`
|
||||
Editable bool `json:"editable"`
|
||||
}
|
||||
|
||||
type ActiveScheduleChangeItem struct {
|
||||
ChangeID string `json:"change_id"`
|
||||
ChangeType string `json:"change_type"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
FromSlot *SlotDTO `json:"from_slot,omitempty"`
|
||||
ToSlot *SlotSpanDTO `json:"to_slot,omitempty"`
|
||||
DurationSections int `json:"duration_sections"`
|
||||
AffectedEventIDs []int `json:"affected_event_ids"`
|
||||
EditedAllowed bool `json:"edited_allowed"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
}
|
||||
|
||||
type SlotDTO struct {
|
||||
Week int `json:"week"`
|
||||
DayOfWeek int `json:"day_of_week"`
|
||||
Section int `json:"section"`
|
||||
StartAt time.Time `json:"start_at,omitempty"`
|
||||
EndAt time.Time `json:"end_at,omitempty"`
|
||||
}
|
||||
|
||||
type SlotSpanDTO struct {
|
||||
Start SlotDTO `json:"start"`
|
||||
End SlotDTO `json:"end"`
|
||||
DurationSections int `json:"duration_sections"`
|
||||
}
|
||||
|
||||
type RiskDTO struct {
|
||||
Level string `json:"level"`
|
||||
Summary string `json:"summary"`
|
||||
Validation candidate.Validation `json:"validation"`
|
||||
RiskMetrics observe.RiskMetrics `json:"risk_metrics"`
|
||||
AffectedIDs []int `json:"affected_event_ids"`
|
||||
RequiresLLM bool `json:"requires_llm"`
|
||||
FallbackUsed bool `json:"fallback_used"`
|
||||
}
|
||||
|
||||
// rawPreviewSnapshot 聚合需要写入 active_schedule_previews JSON 字段的快照。
|
||||
type rawPreviewSnapshot struct {
|
||||
selectedCandidate CandidateDTO
|
||||
candidates []CandidateDTO
|
||||
decision observe.Decision
|
||||
metrics observe.Metrics
|
||||
issues []observe.Issue
|
||||
contextSummary ContextSummaryDTO
|
||||
before SchedulePreviewVersion
|
||||
changes []ActiveScheduleChangeItem
|
||||
after SchedulePreviewVersion
|
||||
risk RiskDTO
|
||||
}
|
||||
|
||||
func slotDTO(slot ports.Slot) SlotDTO {
|
||||
return SlotDTO{
|
||||
Week: slot.Week,
|
||||
DayOfWeek: slot.DayOfWeek,
|
||||
Section: slot.Section,
|
||||
StartAt: slot.StartAt,
|
||||
EndAt: slot.EndAt,
|
||||
}
|
||||
}
|
||||
|
||||
func slotSpanDTO(span ports.SlotSpan) SlotSpanDTO {
|
||||
return SlotSpanDTO{
|
||||
Start: slotDTO(span.Start),
|
||||
End: slotDTO(span.End),
|
||||
DurationSections: span.DurationSections,
|
||||
}
|
||||
}
|
||||
|
||||
// decodeJSONField 只负责 preview 包内部 DTO 解码。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 当前任务限制只允许修改 preview 目录,不能把 JSON helper 下沉到公共层;
|
||||
// 2. 因此这里暂时保留包内小函数,后续若第二个 active_scheduler 子包也需要同类能力,再按 AGENTS 规则抽公共层;
|
||||
// 3. 解码失败返回原始错误,避免把损坏快照静默展示给用户。
|
||||
func decodeJSONField[T any](raw *string, fallback T) (T, error) {
|
||||
if raw == nil || *raw == "" {
|
||||
return fallback, nil
|
||||
}
|
||||
var value T
|
||||
if err := json.Unmarshal([]byte(*raw), &value); err != nil {
|
||||
return fallback, err
|
||||
}
|
||||
return value, nil
|
||||
}
|
||||
307
backend/services/active_scheduler/core/preview/service.go
Normal file
307
backend/services/active_scheduler/core/preview/service.go
Normal file
@@ -0,0 +1,307 @@
|
||||
package preview
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrInvalidPreviewRequest = errors.New("主动调度预览请求不合法")
|
||||
ErrPreviewNotFound = errors.New("主动调度预览不存在")
|
||||
)
|
||||
|
||||
// Repository 是 preview service 依赖的最小持久化端口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只覆盖本轮 preview 写入和详情查询需要的方法;
|
||||
// 2. 不暴露正式日程写入、通知投递或 confirm apply 能力;
|
||||
// 3. 现有 dao.ActiveScheduleDAO 已满足该接口,后续迁移独立 repo 时可并行替换实现。
|
||||
type Repository interface {
|
||||
CreatePreview(ctx context.Context, preview *model.ActiveSchedulePreview) error
|
||||
GetPreviewByID(ctx context.Context, previewID string) (*model.ActiveSchedulePreview, error)
|
||||
}
|
||||
|
||||
// Service 负责主动调度 preview 的写入和查询。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 将 dry-run 结果固化为 active_schedule_previews 中的 ready 快照;
|
||||
// 2. 查询时校验 user_id,并返回 API 可直接透传的详情 DTO;
|
||||
// 3. 不正式写日程、不发通知、不处理 confirm/apply,也不修改 trigger 状态。
|
||||
type Service struct {
|
||||
repo Repository
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func NewService(repo Repository) (*Service, error) {
|
||||
if repo == nil {
|
||||
return nil, fmt.Errorf("%w: preview repository 不能为空", ErrInvalidPreviewRequest)
|
||||
}
|
||||
return &Service{repo: repo, clock: time.Now}, nil
|
||||
}
|
||||
|
||||
// SetClock 注入测试时钟。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只影响 generated_at / expires_at 和查询时的 expired 计算;
|
||||
// 2. 不改写 dry-run 上下文中的业务当前时间;
|
||||
// 3. clock 为空时保持原时钟,避免运行期误注入导致 panic。
|
||||
func (s *Service) SetClock(clock func() time.Time) {
|
||||
if s == nil || clock == nil {
|
||||
return
|
||||
}
|
||||
s.clock = clock
|
||||
}
|
||||
|
||||
// CreatePreview 把 dry-run 结果保存为 ready preview。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只消费已经完成的 dry-run 结果,不重新读取任务/日程事实;
|
||||
// 2. 优先吃上层 selection 结果中的 selected_candidate_id / explanation / notification 摘要;
|
||||
// 若上层未显式传入,则为了兼容旧链路继续回退到 top1 candidate;
|
||||
// 3. 写库后只返回详情 DTO,不发布通知、不正式应用候选、不回写 trigger。
|
||||
func (s *Service) CreatePreview(ctx context.Context, req CreatePreviewRequest) (*CreatePreviewResponse, error) {
|
||||
if s == nil || s.repo == nil {
|
||||
return nil, fmt.Errorf("%w: preview service 未初始化", ErrInvalidPreviewRequest)
|
||||
}
|
||||
if req.ActiveContext == nil {
|
||||
return nil, fmt.Errorf("%w: dry-run 结果不能为空", ErrInvalidPreviewRequest)
|
||||
}
|
||||
if len(req.Candidates) == 0 {
|
||||
return nil, fmt.Errorf("%w: dry-run 未生成可保存候选", ErrInvalidPreviewRequest)
|
||||
}
|
||||
|
||||
activeContext := req.ActiveContext
|
||||
triggerID := strings.TrimSpace(req.TriggerID)
|
||||
if triggerID == "" {
|
||||
triggerID = strings.TrimSpace(activeContext.Trigger.TriggerID)
|
||||
}
|
||||
if triggerID == "" {
|
||||
return nil, fmt.Errorf("%w: trigger_id 不能为空", ErrInvalidPreviewRequest)
|
||||
}
|
||||
|
||||
generatedAt := req.GeneratedAt
|
||||
if generatedAt.IsZero() {
|
||||
generatedAt = s.now()
|
||||
}
|
||||
previewID := strings.TrimSpace(req.PreviewID)
|
||||
if previewID == "" {
|
||||
previewID = "asp_" + uuid.NewString()
|
||||
}
|
||||
|
||||
// 1. 先解析选中的候选,再构造展示快照;任何 JSON 转换失败都提前返回,避免落入半结构化记录。
|
||||
// 1.1 若上层已经给出 selected_candidate_id,就严格按该候选落库,避免 preview 与选择结果不一致。
|
||||
// 1.2 若未给出,则继续沿用后端候选顺序的第一条,保持旧流程兼容。
|
||||
// 1.3 若指定 ID 不在候选列表中,直接返回错误,避免写入一份错位的 preview。
|
||||
selected, err := pickSelectedCandidate(req.Candidates, req.SelectedCandidateID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
snapshot := buildSnapshot(activeContext, req.Observation, req.Candidates, selected, req.FallbackUsed)
|
||||
baseVersion := strings.TrimSpace(req.BaseVersion)
|
||||
if baseVersion == "" {
|
||||
baseVersion = buildBaseVersion(activeContext, snapshot.changes)
|
||||
}
|
||||
|
||||
explanation := strings.TrimSpace(req.ExplanationText)
|
||||
if explanation == "" {
|
||||
explanation = selected.Summary
|
||||
}
|
||||
notificationSummary := strings.TrimSpace(req.NotificationSummary)
|
||||
if notificationSummary == "" {
|
||||
notificationSummary = selected.Summary
|
||||
}
|
||||
|
||||
row, err := buildPreviewModel(previewID, triggerID, generatedAt, baseVersion, explanation, notificationSummary, activeContext, snapshot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 2. 写入 active_schedule_previews。这里不包事务写其它表,因为本服务不负责 trigger/notification/apply 状态推进。
|
||||
if err := s.repo.CreatePreview(ctx, row); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
detail, err := detailFromModel(row, s.now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CreatePreviewResponse{Detail: detail}, nil
|
||||
}
|
||||
|
||||
// GetPreview 查询 preview 详情,并强制校验归属用户。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. preview_id 不存在或不属于 user_id 时统一返回 ErrPreviewNotFound,避免泄漏其它用户数据;
|
||||
// 2. 查询不会把过期 preview 回写为 expired,过期状态仅在 DTO 中计算;
|
||||
// 3. 不读取正式日程实时状态,因此不会触发 confirm 的 base_version 重校验。
|
||||
func (s *Service) GetPreview(ctx context.Context, userID int, previewID string) (*ActiveSchedulePreviewDetail, error) {
|
||||
if s == nil || s.repo == nil {
|
||||
return nil, fmt.Errorf("%w: preview service 未初始化", ErrInvalidPreviewRequest)
|
||||
}
|
||||
if userID <= 0 || strings.TrimSpace(previewID) == "" {
|
||||
return nil, ErrPreviewNotFound
|
||||
}
|
||||
|
||||
row, err := s.repo.GetPreviewByID(ctx, strings.TrimSpace(previewID))
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, ErrPreviewNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if row == nil || row.UserID != userID {
|
||||
return nil, ErrPreviewNotFound
|
||||
}
|
||||
|
||||
detail, err := detailFromModel(row, s.now())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &detail, nil
|
||||
}
|
||||
|
||||
func (s *Service) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
func pickSelectedCandidate(candidates []candidate.Candidate, selectedCandidateID string) (candidate.Candidate, error) {
|
||||
if len(candidates) == 0 {
|
||||
return candidate.Candidate{}, fmt.Errorf("%w: dry-run 链路未生成可保存候选", ErrInvalidPreviewRequest)
|
||||
}
|
||||
|
||||
selectedCandidateID = strings.TrimSpace(selectedCandidateID)
|
||||
if selectedCandidateID == "" {
|
||||
return candidates[0], nil
|
||||
}
|
||||
|
||||
for _, item := range candidates {
|
||||
if strings.TrimSpace(item.CandidateID) == selectedCandidateID {
|
||||
return item, nil
|
||||
}
|
||||
}
|
||||
return candidate.Candidate{}, fmt.Errorf("%w: selected_candidate_id 不在候选列表中", ErrInvalidPreviewRequest)
|
||||
}
|
||||
|
||||
func buildPreviewModel(
|
||||
previewID string,
|
||||
triggerID string,
|
||||
generatedAt time.Time,
|
||||
baseVersion string,
|
||||
explanation string,
|
||||
notificationSummary string,
|
||||
activeContext *schedulercontext.ActiveScheduleContext,
|
||||
snapshot rawPreviewSnapshot,
|
||||
) (*model.ActiveSchedulePreview, error) {
|
||||
selectedJSON, err := jsonString(snapshot.selectedCandidate)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
candidatesJSON, err := jsonString(snapshot.candidates)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decisionJSON, err := jsonString(snapshot.decision)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
metricsJSON, err := jsonString(snapshot.metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
issuesJSON, err := jsonString(snapshot.issues)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contextJSON, err := jsonString(snapshot.contextSummary)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
beforeJSON, err := jsonString(snapshot.before)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
changesJSON, err := jsonString(snapshot.changes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
afterJSON, err := jsonString(snapshot.after)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
riskJSON, err := jsonString(snapshot.risk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &model.ActiveSchedulePreview{
|
||||
ID: previewID,
|
||||
UserID: activeContext.User.UserID,
|
||||
TriggerID: triggerID,
|
||||
TriggerType: string(activeContext.Trigger.TriggerType),
|
||||
TargetType: string(activeContext.Trigger.TargetType),
|
||||
TargetID: activeContext.Trigger.TargetID,
|
||||
Status: model.ActiveSchedulePreviewStatusReady,
|
||||
SelectedCandidateID: snapshot.selectedCandidate.CandidateID,
|
||||
CandidateCount: len(snapshot.candidates),
|
||||
SelectedCandidateJSON: &selectedJSON,
|
||||
CandidatesJSON: &candidatesJSON,
|
||||
DecisionJSON: &decisionJSON,
|
||||
MetricsJSON: &metricsJSON,
|
||||
IssuesJSON: &issuesJSON,
|
||||
ContextSummaryJSON: &contextJSON,
|
||||
BeforeSummaryJSON: &beforeJSON,
|
||||
PreviewChangesJSON: &changesJSON,
|
||||
AfterSummaryJSON: &afterJSON,
|
||||
RiskJSON: &riskJSON,
|
||||
ExplanationText: explanation,
|
||||
NotificationSummary: notificationSummary,
|
||||
BaseVersion: baseVersion,
|
||||
ExpiresAt: generatedAt.Add(time.Hour),
|
||||
GeneratedAt: generatedAt,
|
||||
ApplyStatus: model.ActiveScheduleApplyStatusNone,
|
||||
TraceID: activeContext.Trace.TraceID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func buildSnapshot(
|
||||
activeContext *schedulercontext.ActiveScheduleContext,
|
||||
observation observe.Result,
|
||||
candidates []candidate.Candidate,
|
||||
selected candidate.Candidate,
|
||||
fallbackUsed bool,
|
||||
) rawPreviewSnapshot {
|
||||
selectedDTO := candidateDTO(selected)
|
||||
candidateDTOs := make([]CandidateDTO, 0, len(candidates))
|
||||
for _, item := range candidates {
|
||||
candidateDTOs = append(candidateDTOs, candidateDTO(item))
|
||||
}
|
||||
changes := changeDTOs(selected.CandidateID, selected.Changes)
|
||||
before := buildBeforeSummary(activeContext, selected, changes)
|
||||
after := buildAfterSummary(before, selected, changes)
|
||||
|
||||
return rawPreviewSnapshot{
|
||||
selectedCandidate: selectedDTO,
|
||||
candidates: candidateDTOs,
|
||||
decision: observation.Decision,
|
||||
metrics: observation.Metrics,
|
||||
issues: observation.Issues,
|
||||
contextSummary: contextSummaryDTO(activeContext),
|
||||
before: before,
|
||||
changes: changes,
|
||||
after: after,
|
||||
risk: riskDTO(selected, observation, changes, fallbackUsed),
|
||||
}
|
||||
}
|
||||
61
backend/services/active_scheduler/core/selection/dto.go
Normal file
61
backend/services/active_scheduler/core/selection/dto.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package selection
|
||||
|
||||
import (
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
)
|
||||
|
||||
const (
|
||||
ActionSelectCandidate = "select_candidate"
|
||||
ActionAskUser = "ask_user"
|
||||
ActionNotifyOnly = "notify_only"
|
||||
ActionClose = "close"
|
||||
)
|
||||
|
||||
// SelectRequest 是主动调度选择器的输入。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只承载已经由 dry-run 生成并校验过的上下文、观测结果和候选;
|
||||
// 2. 不包含任何模型实例,不负责 prompt 拼接;
|
||||
// 3. 由 graph runner 在 dry-run 之后传入,避免选择器直接回查数据库。
|
||||
type SelectRequest struct {
|
||||
ActiveContext *schedulercontext.ActiveScheduleContext `json:"-"`
|
||||
Observation observe.Result `json:"-"`
|
||||
Candidates []candidate.Candidate `json:"-"`
|
||||
}
|
||||
|
||||
// Result 是选择器的结构化输出。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只记录最终选中的候选与给用户看的解释摘要;
|
||||
// 2. 不包含正式日程写入结果,也不包含通知投递结果;
|
||||
// 3. FallbackUsed 只表示本次是否回退到了确定性兜底,不允许靠 selected_candidate_id 推断。
|
||||
type Result struct {
|
||||
Action string `json:"action"`
|
||||
SelectedCandidateID string `json:"selected_candidate_id,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
ExplanationText string `json:"explanation_text,omitempty"`
|
||||
NotificationSummary string `json:"notification_summary,omitempty"`
|
||||
AskUserQuestion string `json:"ask_user_question,omitempty"`
|
||||
FallbackUsed bool `json:"fallback_used,omitempty"`
|
||||
Confidence float64 `json:"confidence,omitempty"`
|
||||
}
|
||||
|
||||
// CandidateView 是暴露给 LLM 的最小候选视图。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只保留用于有限选择的基础信息和少量结构化维度;
|
||||
// 2. 不暴露 score / validation 这类内部实现细节;
|
||||
// 3. 不直接携带原始日程事实,避免模型看到过多上下文。
|
||||
type CandidateView struct {
|
||||
CandidateID string `json:"candidate_id"`
|
||||
CandidateType string `json:"candidate_type"`
|
||||
Title string `json:"title"`
|
||||
Summary string `json:"summary"`
|
||||
BeforeSummary string `json:"before_summary"`
|
||||
AfterSummary string `json:"after_summary"`
|
||||
ChangeSummary string `json:"change_summary"`
|
||||
CapacityFit string `json:"capacity_fit"`
|
||||
RiskLevel string `json:"risk_level"`
|
||||
}
|
||||
232
backend/services/active_scheduler/core/selection/prompt.go
Normal file
232
backend/services/active_scheduler/core/selection/prompt.go
Normal file
@@ -0,0 +1,232 @@
|
||||
package selection
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
)
|
||||
|
||||
const selectionSystemPrompt = `
|
||||
你是 SmartFlow 主动调度的候选选择器。
|
||||
|
||||
你的职责很窄:
|
||||
1. 只在后端已经生成并校验过的候选里做有限选择;
|
||||
2. 只参考结构化事实、候选基础信息、capacity_fit 和 risk_level;
|
||||
3. 不要输出推理过程,不要输出 score,不要输出 confidence,不要编造新的候选;
|
||||
4. 只输出 JSON,不要输出 markdown,不要输出解释性正文。
|
||||
|
||||
允许的 action:
|
||||
- select_candidate
|
||||
- ask_user
|
||||
- notify_only
|
||||
- close
|
||||
|
||||
输出 JSON 结构:
|
||||
{
|
||||
"action": "select_candidate",
|
||||
"selected_candidate_id": "cand_xxx",
|
||||
"reason": "简短选择理由",
|
||||
"explanation_text": "给用户看的简短解释",
|
||||
"notification_summary": "通知里要显示的简短摘要",
|
||||
"ask_user_question": "需要追问时填写,否则留空"
|
||||
}
|
||||
|
||||
规则:
|
||||
1. selected_candidate_id 必须来自候选列表;如果 action=close,也要选候选列表里对应的 close 候选;
|
||||
2. 如果需要追问,优先选择 ask_user 候选,并把 ask_user_question 写清楚;
|
||||
3. 如果只需要提醒,不要编造正式日程变更;
|
||||
4. 如果候选里已经有明显更稳妥的项,优先选风险更低且 capacity_fit 更好的那个;
|
||||
5. 不要试图重排整段日程,第一版只在候选之间做有限裁决;
|
||||
6. 如果信息不足,就直接走 ask_user,不要硬猜。
|
||||
`
|
||||
|
||||
type selectionPromptInput struct {
|
||||
GeneratedAt string `json:"generated_at"`
|
||||
Trigger selectionTriggerInput `json:"trigger"`
|
||||
DecisionHint selectionDecisionInput `json:"decision_hint"`
|
||||
Context selectionContextInput `json:"context"`
|
||||
Candidates []CandidateView `json:"candidates"`
|
||||
}
|
||||
|
||||
type selectionTriggerInput struct {
|
||||
TriggerID string `json:"trigger_id"`
|
||||
TriggerType string `json:"trigger_type"`
|
||||
Source string `json:"source"`
|
||||
TargetType string `json:"target_type"`
|
||||
TargetID int `json:"target_id"`
|
||||
TargetTitle string `json:"target_title"`
|
||||
RequestedAt string `json:"requested_at"`
|
||||
TraceID string `json:"trace_id"`
|
||||
}
|
||||
|
||||
type selectionDecisionInput struct {
|
||||
Action string `json:"action"`
|
||||
PrimaryIssueCode string `json:"primary_issue_code"`
|
||||
ReasonCode string `json:"reason_code"`
|
||||
ShouldNotify bool `json:"should_notify"`
|
||||
ShouldWritePreview bool `json:"should_write_preview"`
|
||||
FallbackCandidateID string `json:"fallback_candidate_id,omitempty"`
|
||||
}
|
||||
|
||||
type selectionContextInput struct {
|
||||
WindowStart string `json:"window_start"`
|
||||
WindowEnd string `json:"window_end"`
|
||||
WindowReason string `json:"window_reason"`
|
||||
MissingInfo []string `json:"missing_info"`
|
||||
Warnings []string `json:"warnings"`
|
||||
TraceSteps []string `json:"trace_steps"`
|
||||
}
|
||||
|
||||
func buildSelectionPromptInput(req SelectRequest, now time.Time) selectionPromptInput {
|
||||
activeContext := req.ActiveContext
|
||||
decision := req.Observation.Decision
|
||||
input := selectionPromptInput{
|
||||
GeneratedAt: now.In(time.Local).Format(time.RFC3339),
|
||||
DecisionHint: selectionDecisionInput{
|
||||
Action: string(decision.Action),
|
||||
PrimaryIssueCode: string(decision.PrimaryIssueCode),
|
||||
ReasonCode: decision.ReasonCode,
|
||||
ShouldNotify: decision.ShouldNotify,
|
||||
ShouldWritePreview: decision.ShouldWritePreview,
|
||||
FallbackCandidateID: strings.TrimSpace(decision.FallbackCandidateID),
|
||||
},
|
||||
}
|
||||
if activeContext != nil {
|
||||
input.Trigger = selectionTriggerInput{
|
||||
TriggerID: activeContext.Trigger.TriggerID,
|
||||
TriggerType: string(activeContext.Trigger.TriggerType),
|
||||
Source: string(activeContext.Trigger.Source),
|
||||
TargetType: string(activeContext.Trigger.TargetType),
|
||||
TargetID: activeContext.Trigger.TargetID,
|
||||
TargetTitle: activeContext.Target.Title,
|
||||
RequestedAt: activeContext.Trigger.RequestedAt.In(time.Local).Format(time.RFC3339),
|
||||
TraceID: activeContext.Trace.TraceID,
|
||||
}
|
||||
input.Context = selectionContextInput{
|
||||
WindowStart: activeContext.Window.StartAt.In(time.Local).Format(time.RFC3339),
|
||||
WindowEnd: activeContext.Window.EndAt.In(time.Local).Format(time.RFC3339),
|
||||
WindowReason: activeContext.Window.WindowReason,
|
||||
MissingInfo: append([]string(nil), activeContext.DerivedFacts.MissingInfo...),
|
||||
Warnings: append([]string(nil), activeContext.Trace.Warnings...),
|
||||
TraceSteps: append([]string(nil), activeContext.Trace.BuildSteps...),
|
||||
}
|
||||
}
|
||||
|
||||
input.Candidates = make([]CandidateView, 0, len(req.Candidates))
|
||||
for _, item := range req.Candidates {
|
||||
input.Candidates = append(input.Candidates, buildCandidateView(req, item))
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
func buildSelectionUserPrompt(input selectionPromptInput) (string, error) {
|
||||
raw, err := json.MarshalIndent(input, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var sb strings.Builder
|
||||
sb.WriteString("请基于下面的结构化事实,从候选中选一个最合适的结果。\n")
|
||||
sb.WriteString("输入:\n")
|
||||
sb.WriteString(string(raw))
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString("只输出 JSON。")
|
||||
return sb.String(), nil
|
||||
}
|
||||
|
||||
func buildCandidateView(req SelectRequest, item candidate.Candidate) CandidateView {
|
||||
return CandidateView{
|
||||
CandidateID: item.CandidateID,
|
||||
CandidateType: string(item.CandidateType),
|
||||
Title: item.Title,
|
||||
Summary: item.Summary,
|
||||
BeforeSummary: item.BeforeSummary,
|
||||
AfterSummary: item.AfterSummary,
|
||||
ChangeSummary: buildChangeSummary(item),
|
||||
CapacityFit: deriveCapacityFit(req, item),
|
||||
RiskLevel: deriveRiskLevel(req, item),
|
||||
}
|
||||
}
|
||||
|
||||
func buildChangeSummary(item candidate.Candidate) string {
|
||||
if len(item.Changes) == 0 {
|
||||
return "无正式变更"
|
||||
}
|
||||
lines := make([]string, 0, len(item.Changes))
|
||||
for _, change := range item.Changes {
|
||||
lines = append(lines, summarizeChange(change))
|
||||
}
|
||||
return strings.Join(lines, ";")
|
||||
}
|
||||
|
||||
func summarizeChange(change candidate.ChangeItem) string {
|
||||
switch change.ChangeType {
|
||||
case candidate.ChangeTypeAdd:
|
||||
if change.ToSlot != nil {
|
||||
return fmt.Sprintf("新增到 第%d周 第%d天 第%d-%d节,持续%d节",
|
||||
change.ToSlot.Start.Week,
|
||||
change.ToSlot.Start.DayOfWeek,
|
||||
change.ToSlot.Start.Section,
|
||||
change.ToSlot.End.Section,
|
||||
change.DurationSections,
|
||||
)
|
||||
}
|
||||
return "新增一段日程"
|
||||
case candidate.ChangeTypeCreateMakeup:
|
||||
if change.ToSlot != nil {
|
||||
return fmt.Sprintf("为目标补做一段第%d周第%d天第%d-%d节的时间块",
|
||||
change.ToSlot.Start.Week,
|
||||
change.ToSlot.Start.DayOfWeek,
|
||||
change.ToSlot.Start.Section,
|
||||
change.ToSlot.End.Section,
|
||||
)
|
||||
}
|
||||
return "新增补做块"
|
||||
case candidate.ChangeTypeAskUser:
|
||||
return "需要用户补充信息"
|
||||
default:
|
||||
return "不修改正式日程"
|
||||
}
|
||||
}
|
||||
|
||||
func deriveCapacityFit(req SelectRequest, item candidate.Candidate) string {
|
||||
switch item.CandidateType {
|
||||
case candidate.TypeAskUser, candidate.TypeNotifyOnly, candidate.TypeClose:
|
||||
return "not_applicable"
|
||||
}
|
||||
|
||||
if !item.Validation.Valid {
|
||||
return "insufficient"
|
||||
}
|
||||
|
||||
gap := req.Observation.Metrics.Window.CapacityGap
|
||||
switch {
|
||||
case gap > 0:
|
||||
return "insufficient"
|
||||
case gap == 0:
|
||||
return "tight"
|
||||
default:
|
||||
return "fit"
|
||||
}
|
||||
}
|
||||
|
||||
func deriveRiskLevel(req SelectRequest, item candidate.Candidate) string {
|
||||
if !item.Validation.Valid {
|
||||
return "high"
|
||||
}
|
||||
switch item.CandidateType {
|
||||
case candidate.TypeCreateMakeup:
|
||||
return "medium"
|
||||
case candidate.TypeAddTaskPoolToSchedule:
|
||||
if req.Observation.Metrics.Window.CapacityGap == 0 {
|
||||
return "medium"
|
||||
}
|
||||
return "low"
|
||||
case candidate.TypeAskUser, candidate.TypeNotifyOnly, candidate.TypeClose:
|
||||
return "low"
|
||||
default:
|
||||
return "low"
|
||||
}
|
||||
}
|
||||
303
backend/services/active_scheduler/core/selection/service.go
Normal file
303
backend/services/active_scheduler/core/selection/service.go
Normal file
@@ -0,0 +1,303 @@
|
||||
package selection
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
)
|
||||
|
||||
const selectionMaxTokens = 1200
|
||||
|
||||
// Service 负责主动调度候选的受限 LLM 选择。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只在后端候选中做选择和解释生成,不生成新候选;
|
||||
// 2. LLM 失败、输出非法或选择不存在候选时,回退到后端 fallback candidate;
|
||||
// 3. 不写 preview、不发通知、不修改正式日程。
|
||||
type Service struct {
|
||||
client *llmservice.Client
|
||||
clock func() time.Time
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewService 创建主动调度选择器。
|
||||
//
|
||||
// 说明:
|
||||
// 1. client 允许为空;为空时选择器只走确定性 fallback,便于本地测试和降级;
|
||||
// 2. 真正的模型接入在 cmd/start.go 中完成:aiHub.Pro -> llm.Client -> selection.Service;
|
||||
// 3. 选择器本身不持有模型配置,只表达本业务域的 prompt 和结果校验。
|
||||
func NewService(client *llmservice.Client) *Service {
|
||||
return &Service{
|
||||
client: client,
|
||||
clock: time.Now,
|
||||
logger: log.Default(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) SetClock(clock func() time.Time) {
|
||||
if s != nil && clock != nil {
|
||||
s.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// Select 对主动调度候选做有限选择。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先校验 dry-run 输入,保证 LLM 不会拿到空上下文或空候选;
|
||||
// 2. 若模型不可用,直接使用后端 fallback candidate,并显式标记 FallbackUsed;
|
||||
// 3. 若模型可用,构造只读候选视图,隐藏 score / confidence / 原始事实快照;
|
||||
// 4. 校验 LLM 返回的 candidate_id 是否存在,非法则回退;
|
||||
// 5. 最终结果只交给 preview 层落库,不在这里产生任何副作用。
|
||||
func (s *Service) Select(ctx context.Context, req SelectRequest) (Result, error) {
|
||||
if err := validateRequest(req); err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
|
||||
if s == nil || s.client == nil {
|
||||
return buildFallbackResult(req, "模型客户端未配置"), nil
|
||||
}
|
||||
|
||||
input := buildSelectionPromptInput(req, s.now())
|
||||
userPrompt, err := buildSelectionUserPrompt(input)
|
||||
if err != nil {
|
||||
return buildFallbackResult(req, "选择器 prompt 构造失败: "+err.Error()), nil
|
||||
}
|
||||
|
||||
messages := llmservice.BuildSystemUserMessages(
|
||||
strings.TrimSpace(selectionSystemPrompt),
|
||||
nil,
|
||||
userPrompt,
|
||||
)
|
||||
resp, rawResult, err := llmservice.GenerateJSON[llmSelectionResponse](
|
||||
ctx,
|
||||
s.client,
|
||||
messages,
|
||||
llmservice.GenerateOptions{
|
||||
Temperature: 0.1,
|
||||
MaxTokens: selectionMaxTokens,
|
||||
Thinking: llmservice.ThinkingModeDisabled,
|
||||
Metadata: map[string]any{
|
||||
"stage": "active_scheduler_select",
|
||||
"candidate_count": len(req.Candidates),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
if s.logger != nil {
|
||||
s.logger.Printf("[WARN] 主动调度 LLM 选择失败,使用 fallback: err=%v raw=%s", err, truncateRaw(rawResult))
|
||||
}
|
||||
return buildFallbackResult(req, "模型选择失败: "+err.Error()), nil
|
||||
}
|
||||
|
||||
result, fallbackUsed := convertLLMResponse(req, resp)
|
||||
if fallbackUsed && s.logger != nil {
|
||||
selectedCandidateID := ""
|
||||
action := ""
|
||||
if resp != nil {
|
||||
selectedCandidateID = strings.TrimSpace(resp.SelectedCandidateID)
|
||||
action = strings.TrimSpace(resp.Action)
|
||||
}
|
||||
s.logger.Printf("[WARN] 主动调度 LLM 选择结果非法,使用 fallback: selected=%q action=%q",
|
||||
selectedCandidateID,
|
||||
action,
|
||||
)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type llmSelectionResponse struct {
|
||||
Action string `json:"action"`
|
||||
SelectedCandidateID string `json:"selected_candidate_id"`
|
||||
Reason string `json:"reason"`
|
||||
ExplanationText string `json:"explanation_text"`
|
||||
NotificationSummary string `json:"notification_summary"`
|
||||
AskUserQuestion string `json:"ask_user_question"`
|
||||
}
|
||||
|
||||
func validateRequest(req SelectRequest) error {
|
||||
if req.ActiveContext == nil {
|
||||
return errors.New("active scheduler selection 缺少上下文")
|
||||
}
|
||||
if len(req.Candidates) == 0 {
|
||||
return errors.New("active scheduler selection 缺少候选")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertLLMResponse(req SelectRequest, resp *llmSelectionResponse) (Result, bool) {
|
||||
if resp == nil {
|
||||
return buildFallbackResult(req, "模型返回空选择结果"), true
|
||||
}
|
||||
|
||||
selected, ok := findCandidate(req.Candidates, resp.SelectedCandidateID)
|
||||
if !ok {
|
||||
return buildFallbackResult(req, "模型选择了不存在的候选"), true
|
||||
}
|
||||
|
||||
inferredAction := inferAction(selected)
|
||||
action := normalizeAction(resp.Action)
|
||||
fallbackUsed := false
|
||||
if action == "" || !isActionCompatible(action, selected.CandidateType) {
|
||||
action = inferredAction
|
||||
fallbackUsed = true
|
||||
}
|
||||
|
||||
explanation := firstNonEmpty(resp.ExplanationText, selected.Summary)
|
||||
notificationSummary := firstNonEmpty(resp.NotificationSummary, explanation, selected.Summary)
|
||||
askUserQuestion := strings.TrimSpace(resp.AskUserQuestion)
|
||||
if action == ActionAskUser && askUserQuestion == "" {
|
||||
askUserQuestion = explanation
|
||||
}
|
||||
|
||||
return Result{
|
||||
Action: action,
|
||||
SelectedCandidateID: selected.CandidateID,
|
||||
Reason: strings.TrimSpace(resp.Reason),
|
||||
ExplanationText: explanation,
|
||||
NotificationSummary: notificationSummary,
|
||||
AskUserQuestion: askUserQuestion,
|
||||
FallbackUsed: fallbackUsed,
|
||||
Confidence: deriveInternalConfidence(selected),
|
||||
}, fallbackUsed
|
||||
}
|
||||
|
||||
func buildFallbackResult(req SelectRequest, reason string) Result {
|
||||
selected := pickFallbackCandidate(req)
|
||||
action := inferAction(selected)
|
||||
explanation := firstNonEmpty(selected.Summary, reason)
|
||||
return Result{
|
||||
Action: action,
|
||||
SelectedCandidateID: selected.CandidateID,
|
||||
Reason: strings.TrimSpace(reason),
|
||||
ExplanationText: explanation,
|
||||
NotificationSummary: explanation,
|
||||
AskUserQuestion: fallbackAskUserQuestion(action, explanation),
|
||||
FallbackUsed: true,
|
||||
Confidence: deriveInternalConfidence(selected),
|
||||
}
|
||||
}
|
||||
|
||||
func pickFallbackCandidate(req SelectRequest) candidate.Candidate {
|
||||
fallbackID := strings.TrimSpace(req.Observation.Decision.FallbackCandidateID)
|
||||
if fallbackID != "" {
|
||||
if selected, ok := findCandidate(req.Candidates, fallbackID); ok {
|
||||
return selected
|
||||
}
|
||||
}
|
||||
return req.Candidates[0]
|
||||
}
|
||||
|
||||
func findCandidate(candidates []candidate.Candidate, id string) (candidate.Candidate, bool) {
|
||||
id = strings.TrimSpace(id)
|
||||
if id == "" {
|
||||
return candidate.Candidate{}, false
|
||||
}
|
||||
for _, item := range candidates {
|
||||
if strings.TrimSpace(item.CandidateID) == id {
|
||||
return item, true
|
||||
}
|
||||
}
|
||||
return candidate.Candidate{}, false
|
||||
}
|
||||
|
||||
func normalizeAction(raw string) string {
|
||||
switch strings.ToLower(strings.TrimSpace(raw)) {
|
||||
case ActionSelectCandidate:
|
||||
return ActionSelectCandidate
|
||||
case ActionAskUser:
|
||||
return ActionAskUser
|
||||
case ActionNotifyOnly:
|
||||
return ActionNotifyOnly
|
||||
case ActionClose:
|
||||
return ActionClose
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func inferAction(item candidate.Candidate) string {
|
||||
switch item.CandidateType {
|
||||
case candidate.TypeAskUser:
|
||||
return ActionAskUser
|
||||
case candidate.TypeNotifyOnly:
|
||||
return ActionNotifyOnly
|
||||
case candidate.TypeClose:
|
||||
return ActionClose
|
||||
default:
|
||||
return ActionSelectCandidate
|
||||
}
|
||||
}
|
||||
|
||||
func isActionCompatible(action string, candidateType candidate.Type) bool {
|
||||
switch candidateType {
|
||||
case candidate.TypeAskUser:
|
||||
return action == ActionAskUser
|
||||
case candidate.TypeNotifyOnly:
|
||||
return action == ActionNotifyOnly
|
||||
case candidate.TypeClose:
|
||||
return action == ActionClose
|
||||
default:
|
||||
return action == ActionSelectCandidate
|
||||
}
|
||||
}
|
||||
|
||||
func fallbackAskUserQuestion(action string, explanation string) string {
|
||||
if action != ActionAskUser {
|
||||
return ""
|
||||
}
|
||||
return strings.TrimSpace(explanation)
|
||||
}
|
||||
|
||||
func deriveInternalConfidence(item candidate.Candidate) float64 {
|
||||
if !item.Validation.Valid {
|
||||
return 0.2
|
||||
}
|
||||
if item.Score <= 0 {
|
||||
return 0.55
|
||||
}
|
||||
score := float64(item.Score) / 100
|
||||
return math.Max(0.35, math.Min(0.95, score))
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func truncateRaw(raw *llmservice.TextResult) string {
|
||||
if raw == nil {
|
||||
return ""
|
||||
}
|
||||
text := strings.TrimSpace(raw.Text)
|
||||
runes := []rune(text)
|
||||
if len(runes) <= 200 {
|
||||
return text
|
||||
}
|
||||
return string(runes[:200]) + "..."
|
||||
}
|
||||
|
||||
func (s *Service) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
func (r Result) String() string {
|
||||
return fmt.Sprintf("active_scheduler_selection(action=%s, selected=%s, fallback=%t)",
|
||||
r.Action,
|
||||
r.SelectedCandidateID,
|
||||
r.FallbackUsed,
|
||||
)
|
||||
}
|
||||
92
backend/services/active_scheduler/core/service/dry_run.go
Normal file
92
backend/services/active_scheduler/core/service/dry_run.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
// DryRunResult 是 API dry-run / worker 测试入口可直接消费的同步结果。
|
||||
type DryRunResult struct {
|
||||
Context *schedulercontext.ActiveScheduleContext
|
||||
Observation observe.Result
|
||||
Candidates []candidate.Candidate
|
||||
}
|
||||
|
||||
// DryRunService 编排主动调度 dry-run 主链路。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 固定执行 BuildContext -> Observe -> GenerateCandidates;
|
||||
// 2. 不调用 LLM、不写 preview、不发 notification、不正式写日程;
|
||||
// 3. 后续 API / worker 应复用该入口,避免出现第二套 dry-run 诊断逻辑。
|
||||
type DryRunService struct {
|
||||
builder *schedulercontext.Builder
|
||||
analyzer *observe.Analyzer
|
||||
generator *candidate.Generator
|
||||
}
|
||||
|
||||
// NewDryRunService 创建主动调度 dry-run 服务。
|
||||
func NewDryRunService(readers ports.Readers) (*DryRunService, error) {
|
||||
builder, err := schedulercontext.NewBuilder(readers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DryRunService{
|
||||
builder: builder,
|
||||
analyzer: observe.NewAnalyzer(),
|
||||
generator: candidate.NewGenerator(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetClock 注入测试时钟。
|
||||
func (s *DryRunService) SetClock(clock func() time.Time) {
|
||||
if s != nil && s.builder != nil {
|
||||
s.builder.SetClock(clock)
|
||||
}
|
||||
}
|
||||
|
||||
// DryRun 执行主动调度同步诊断。
|
||||
func (s *DryRunService) DryRun(ctx context.Context, trig trigger.ActiveScheduleTrigger) (*DryRunResult, error) {
|
||||
if s == nil || s.builder == nil || s.analyzer == nil || s.generator == nil {
|
||||
return nil, errors.New("DryRunService 尚未正确初始化")
|
||||
}
|
||||
|
||||
// 1. 构造上下文:读取 task / schedule / feedback 的只读事实快照。
|
||||
activeContext, err := s.builder.BuildContext(ctx, trig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 2. 主动观测:生成 metrics、issues 和初步裁决,不生成正式变更。
|
||||
observation := s.analyzer.Observe(activeContext)
|
||||
|
||||
// 3. 候选生成:只枚举第一版允许的确定性候选,压缩融合保持关闭。
|
||||
candidates := s.generator.GenerateCandidates(activeContext, observation)
|
||||
fallbackCandidateID := ""
|
||||
if len(candidates) > 0 {
|
||||
fallbackCandidateID = candidates[0].CandidateID
|
||||
}
|
||||
observation = s.analyzer.FinalizeDecision(observation, len(applicableCandidates(candidates)), fallbackCandidateID)
|
||||
|
||||
return &DryRunResult{
|
||||
Context: activeContext,
|
||||
Observation: observation,
|
||||
Candidates: candidates,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func applicableCandidates(candidates []candidate.Candidate) []candidate.Candidate {
|
||||
result := make([]candidate.Candidate, 0, len(candidates))
|
||||
for _, item := range candidates {
|
||||
if item.CandidateType == candidate.TypeAddTaskPoolToSchedule || item.CandidateType == candidate.TypeCreateMakeup {
|
||||
result = append(result, item)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
)
|
||||
|
||||
// AsGraphDryRunFunc 把现有 dry-run service 适配成 graph runner 可用的入口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做 service.Result -> graph.DryRunData 的轻量转换;
|
||||
// 2. 不改写 dry-run 行为,不引入额外候选逻辑;
|
||||
// 3. 让 graph runner 可以复用现有 BuildContext -> Observe -> GenerateCandidates 链路。
|
||||
func (s *DryRunService) AsGraphDryRunFunc() activegraph.DryRunFunc {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
return func(ctx context.Context, trig trigger.ActiveScheduleTrigger) (*activegraph.DryRunData, error) {
|
||||
result, err := s.DryRun(ctx, trig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &activegraph.DryRunData{
|
||||
Context: result.Context,
|
||||
Observation: result.Observation,
|
||||
Candidates: result.Candidates,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,366 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// PreviewConfirmService 编排第三阶段的预览生成、查询和确认应用。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 复用 dry-run 结果写 preview,不重新实现候选生成;
|
||||
// 2. confirm 时只负责 preview 状态、幂等和 apply port 调用编排;
|
||||
// 3. 正式 schedule 写入仍由 applyadapter 在事务中完成。
|
||||
type PreviewConfirmService struct {
|
||||
dryRun *DryRunService
|
||||
preview *activepreview.Service
|
||||
activeDAO *dao.ActiveScheduleDAO
|
||||
applyAdapter *applyadapter.GormApplyAdapter
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func NewPreviewConfirmService(dryRun *DryRunService, previewService *activepreview.Service, activeDAO *dao.ActiveScheduleDAO, applyAdapter *applyadapter.GormApplyAdapter) (*PreviewConfirmService, error) {
|
||||
if dryRun == nil {
|
||||
return nil, errors.New("dry-run service 不能为空")
|
||||
}
|
||||
if previewService == nil {
|
||||
return nil, errors.New("preview service 不能为空")
|
||||
}
|
||||
if activeDAO == nil {
|
||||
return nil, errors.New("active schedule dao 不能为空")
|
||||
}
|
||||
if applyAdapter == nil {
|
||||
return nil, errors.New("apply adapter 不能为空")
|
||||
}
|
||||
return &PreviewConfirmService{
|
||||
dryRun: dryRun,
|
||||
preview: previewService,
|
||||
activeDAO: activeDAO,
|
||||
applyAdapter: applyAdapter,
|
||||
clock: time.Now,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) SetClock(clock func() time.Time) {
|
||||
if s != nil && clock != nil {
|
||||
s.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) CreatePreviewFromDryRun(ctx context.Context, req activepreview.CreatePreviewRequest) (*activepreview.CreatePreviewResponse, error) {
|
||||
if s == nil || s.preview == nil {
|
||||
return nil, errors.New("preview confirm service 未初始化")
|
||||
}
|
||||
return s.preview.CreatePreview(ctx, req)
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) GetPreview(ctx context.Context, userID int, previewID string) (*activepreview.ActiveSchedulePreviewDetail, error) {
|
||||
if s == nil || s.preview == nil {
|
||||
return nil, errors.New("preview confirm service 未初始化")
|
||||
}
|
||||
return s.preview.GetPreview(ctx, userID, previewID)
|
||||
}
|
||||
|
||||
// ConfirmPreview 同步确认并应用主动调度预览。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先读取 preview 并做同用户校验,避免跨用户确认;
|
||||
// 2. 对已应用且命中同一幂等键的请求直接返回历史结果,避免重复写日程;
|
||||
// 3. 转换 candidate/edited_changes 为 apply 请求;
|
||||
// 4. 先把 preview 标记 applying,再调用正式 apply adapter;
|
||||
// 5. 成功或失败都回写 preview,保证接口返回后可排障。
|
||||
func (s *PreviewConfirmService) ConfirmPreview(ctx context.Context, req activeapply.ConfirmRequest) (*activeapply.ConfirmResult, error) {
|
||||
if s == nil || s.activeDAO == nil || s.applyAdapter == nil {
|
||||
return nil, errors.New("preview confirm service 未初始化")
|
||||
}
|
||||
now := s.now()
|
||||
if req.RequestedAt.IsZero() {
|
||||
req.RequestedAt = now
|
||||
}
|
||||
previewRow, err := s.activeDAO.GetPreviewByID(ctx, req.PreviewID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, activeapply.NewApplyError(activeapply.ErrorCodeTargetNotFound, "预览不存在或已被删除", err)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if previewRow.UserID != req.UserID {
|
||||
return nil, activeapply.NewApplyError(activeapply.ErrorCodeForbidden, "预览不属于当前用户", nil)
|
||||
}
|
||||
if previewRow.ApplyStatus == model.ActiveScheduleApplyStatusApplied {
|
||||
if previewRow.ApplyIdempotencyKey == req.IdempotencyKey {
|
||||
return alreadyAppliedResult(*previewRow), nil
|
||||
}
|
||||
return nil, activeapply.NewApplyError(activeapply.ErrorCodeAlreadyApplied, "预览已经应用,不能使用新的幂等键重复确认", nil)
|
||||
}
|
||||
|
||||
applyReq, err := activeapply.ConvertConfirmToApplyRequest(*previewRow, req, now)
|
||||
if err != nil {
|
||||
_ = s.markApplyFailed(ctx, previewRow.ID, "", err)
|
||||
return nil, err
|
||||
}
|
||||
if len(applyReq.Commands) == 0 {
|
||||
return s.markNoopApplied(ctx, *applyReq)
|
||||
}
|
||||
if err = s.markApplying(ctx, *applyReq); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
adapterReq := toAdapterRequest(*applyReq)
|
||||
adapterResult, err := s.applyAdapter.ApplyActiveScheduleChanges(ctx, adapterReq)
|
||||
if err != nil {
|
||||
classifiedErr := classifyAdapterApplyError(err)
|
||||
_ = s.markApplyFailed(ctx, previewRow.ID, applyReq.ApplyID, classifiedErr)
|
||||
return nil, classifiedErr
|
||||
}
|
||||
|
||||
result := activeapply.ApplyActiveScheduleResult{
|
||||
ApplyID: applyReq.ApplyID,
|
||||
ApplyStatus: activeapply.ApplyStatusApplied,
|
||||
AppliedEventIDs: adapterResult.AppliedEventIDs,
|
||||
AppliedScheduleIDs: adapterResult.AppliedScheduleIDs,
|
||||
AppliedChanges: applyReq.Changes,
|
||||
SkippedChanges: applyReq.SkippedChanges,
|
||||
RequestHash: applyReq.RequestHash,
|
||||
NormalizedChangeHash: applyReq.NormalizedChangesHash,
|
||||
}
|
||||
if err = s.markApplied(ctx, *applyReq, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &activeapply.ConfirmResult{
|
||||
PreviewID: applyReq.PreviewID,
|
||||
ApplyID: applyReq.ApplyID,
|
||||
ApplyStatus: activeapply.ApplyStatusApplied,
|
||||
CandidateID: applyReq.CandidateID,
|
||||
RequestHash: applyReq.RequestHash,
|
||||
RequestBodyHash: applyReq.RequestBodyHash,
|
||||
ApplyRequest: applyReq,
|
||||
ApplyResult: &result,
|
||||
SkippedChanges: applyReq.SkippedChanges,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) markApplying(ctx context.Context, req activeapply.ApplyActiveScheduleRequest) error {
|
||||
return s.activeDAO.UpdatePreviewFields(ctx, req.PreviewID, map[string]any{
|
||||
"apply_id": req.ApplyID,
|
||||
"apply_status": model.ActiveScheduleApplyStatusApplying,
|
||||
"apply_candidate_id": req.CandidateID,
|
||||
"apply_idempotency_key": req.IdempotencyKey,
|
||||
"apply_request_hash": req.RequestHash,
|
||||
})
|
||||
}
|
||||
|
||||
// markNoopApplied 处理 notify_only / ask_user / close 这类“确认成功但不写正式日程”的候选。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只把 preview 标记为已处理,并保留幂等字段,便于同 key 重试直接命中历史结果;
|
||||
// 2. 不调用 apply adapter,因为这些 change 在转换阶段已经被归类为 skipped_changes;
|
||||
// 3. 失败时直接返回数据库错误,调用方应按系统错误处理,避免前端误以为确认成功。
|
||||
func (s *PreviewConfirmService) markNoopApplied(ctx context.Context, req activeapply.ApplyActiveScheduleRequest) (*activeapply.ConfirmResult, error) {
|
||||
result := activeapply.ApplyActiveScheduleResult{
|
||||
ApplyID: req.ApplyID,
|
||||
ApplyStatus: activeapply.ApplyStatusApplied,
|
||||
AppliedChanges: []activeapply.ApplyChange{},
|
||||
SkippedChanges: req.SkippedChanges,
|
||||
RequestHash: req.RequestHash,
|
||||
NormalizedChangeHash: req.NormalizedChangesHash,
|
||||
}
|
||||
if err := s.markApplied(ctx, req, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &activeapply.ConfirmResult{
|
||||
PreviewID: req.PreviewID,
|
||||
ApplyID: req.ApplyID,
|
||||
ApplyStatus: activeapply.ApplyStatusApplied,
|
||||
CandidateID: req.CandidateID,
|
||||
RequestHash: req.RequestHash,
|
||||
RequestBodyHash: req.RequestBodyHash,
|
||||
ApplyRequest: &req,
|
||||
ApplyResult: &result,
|
||||
SkippedChanges: req.SkippedChanges,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) markApplied(ctx context.Context, req activeapply.ApplyActiveScheduleRequest, result activeapply.ApplyActiveScheduleResult) error {
|
||||
now := s.now()
|
||||
appliedChangesJSON := mustJSON(result.AppliedChanges)
|
||||
appliedEventIDsJSON := mustJSON(result.AppliedEventIDs)
|
||||
return s.activeDAO.UpdatePreviewFields(ctx, req.PreviewID, map[string]any{
|
||||
"status": model.ActiveSchedulePreviewStatusApplied,
|
||||
"apply_id": req.ApplyID,
|
||||
"apply_status": model.ActiveScheduleApplyStatusApplied,
|
||||
"apply_candidate_id": req.CandidateID,
|
||||
"apply_idempotency_key": req.IdempotencyKey,
|
||||
"apply_request_hash": req.RequestHash,
|
||||
"applied_changes_json": &appliedChangesJSON,
|
||||
"applied_event_ids_json": &appliedEventIDsJSON,
|
||||
"apply_error": nil,
|
||||
"applied_at": &now,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) markApplyFailed(ctx context.Context, previewID string, applyID string, err error) error {
|
||||
if previewID == "" {
|
||||
return nil
|
||||
}
|
||||
message := ""
|
||||
if err != nil {
|
||||
message = err.Error()
|
||||
}
|
||||
status := model.ActiveScheduleApplyStatusFailed
|
||||
if applyErr, ok := activeapply.AsApplyError(err); ok {
|
||||
switch applyErr.Code {
|
||||
case activeapply.ErrorCodeExpired:
|
||||
status = model.ActiveScheduleApplyStatusExpired
|
||||
case activeapply.ErrorCodeDBError:
|
||||
status = model.ActiveScheduleApplyStatusFailed
|
||||
default:
|
||||
status = model.ActiveScheduleApplyStatusRejected
|
||||
}
|
||||
}
|
||||
updates := map[string]any{
|
||||
"apply_status": status,
|
||||
"apply_error": &message,
|
||||
}
|
||||
if applyID != "" {
|
||||
updates["apply_id"] = applyID
|
||||
}
|
||||
return s.activeDAO.UpdatePreviewFields(ctx, previewID, updates)
|
||||
}
|
||||
|
||||
// classifyAdapterApplyError 把正式写库 adapter 的错误转换为 confirm 层统一错误码。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只处理 applyadapter 已声明的业务错误码,保持 API 层只理解 active_scheduler/apply 包;
|
||||
// 2. 未知错误统一归为 db_error,避免把真实系统故障错误映射为用户可修正的 4xx;
|
||||
// 3. 原始错误作为 cause 保留,日志和 apply_error 仍能追到 adapter 返回的完整信息。
|
||||
func classifyAdapterApplyError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
var adapterErr *applyadapter.ApplyError
|
||||
if !errors.As(err, &adapterErr) {
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeDBError, "主动调度正式写库失败", err)
|
||||
}
|
||||
switch adapterErr.Code {
|
||||
case applyadapter.ErrorCodeInvalidRequest:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeInvalidRequest, adapterErr.Message, err)
|
||||
case applyadapter.ErrorCodeUnsupportedChangeType:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeUnsupportedChangeType, adapterErr.Message, err)
|
||||
case applyadapter.ErrorCodeTargetNotFound:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeTargetNotFound, adapterErr.Message, err)
|
||||
case applyadapter.ErrorCodeTargetCompleted:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeTargetCompleted, adapterErr.Message, err)
|
||||
case applyadapter.ErrorCodeTargetAlreadyScheduled:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeTargetAlreadySchedule, adapterErr.Message, err)
|
||||
case applyadapter.ErrorCodeSlotConflict:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeSlotConflict, adapterErr.Message, err)
|
||||
case applyadapter.ErrorCodeInvalidEditedChanges:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeInvalidEditedChanges, adapterErr.Message, err)
|
||||
default:
|
||||
return activeapply.NewApplyError(activeapply.ErrorCodeDBError, adapterErr.Message, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PreviewConfirmService) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
func toAdapterRequest(req activeapply.ApplyActiveScheduleRequest) applyadapter.ApplyActiveScheduleRequest {
|
||||
changes := make([]applyadapter.ApplyChange, 0, len(req.Changes))
|
||||
for _, change := range req.Changes {
|
||||
changes = append(changes, toAdapterChange(change))
|
||||
}
|
||||
return applyadapter.ApplyActiveScheduleRequest{
|
||||
PreviewID: req.PreviewID,
|
||||
ApplyID: req.ApplyID,
|
||||
UserID: req.UserID,
|
||||
CandidateID: req.CandidateID,
|
||||
Changes: changes,
|
||||
RequestedAt: req.RequestedAt,
|
||||
TraceID: req.TraceID,
|
||||
}
|
||||
}
|
||||
|
||||
func toAdapterChange(change activeapply.ApplyChange) applyadapter.ApplyChange {
|
||||
return applyadapter.ApplyChange{
|
||||
ChangeID: change.ChangeID,
|
||||
ChangeType: string(change.Type),
|
||||
TargetType: change.TargetType,
|
||||
TargetID: change.TargetID,
|
||||
ToSlot: toAdapterSlotSpan(change),
|
||||
DurationSections: change.DurationSections,
|
||||
Metadata: cloneStringMap(change.Metadata),
|
||||
}
|
||||
}
|
||||
|
||||
func toAdapterSlotSpan(change activeapply.ApplyChange) *applyadapter.SlotSpan {
|
||||
if len(change.Slots) == 0 {
|
||||
return nil
|
||||
}
|
||||
start := change.Slots[0]
|
||||
end := change.Slots[len(change.Slots)-1]
|
||||
return &applyadapter.SlotSpan{
|
||||
Start: applyadapter.Slot{Week: start.Week, DayOfWeek: start.DayOfWeek, Section: start.Section},
|
||||
End: applyadapter.Slot{Week: end.Week, DayOfWeek: end.DayOfWeek, Section: end.Section},
|
||||
DurationSections: len(change.Slots),
|
||||
}
|
||||
}
|
||||
|
||||
func alreadyAppliedResult(preview model.ActiveSchedulePreview) *activeapply.ConfirmResult {
|
||||
appliedEventIDs := []int{}
|
||||
if preview.AppliedEventIDsJSON != nil && *preview.AppliedEventIDsJSON != "" {
|
||||
_ = json.Unmarshal([]byte(*preview.AppliedEventIDsJSON), &appliedEventIDs)
|
||||
}
|
||||
return &activeapply.ConfirmResult{
|
||||
PreviewID: preview.ID,
|
||||
ApplyID: stringValue(preview.ApplyID),
|
||||
ApplyStatus: activeapply.ApplyStatusApplied,
|
||||
CandidateID: preview.ApplyCandidateID,
|
||||
RequestHash: preview.ApplyRequestHash,
|
||||
ApplyResult: &activeapply.ApplyActiveScheduleResult{
|
||||
ApplyID: stringValue(preview.ApplyID),
|
||||
ApplyStatus: activeapply.ApplyStatusApplied,
|
||||
AppliedEventIDs: appliedEventIDs,
|
||||
RequestHash: preview.ApplyRequestHash,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func mustJSON(value any) string {
|
||||
raw, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return "null"
|
||||
}
|
||||
return string(raw)
|
||||
}
|
||||
|
||||
func stringValue(value *string) string {
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
return *value
|
||||
}
|
||||
|
||||
func cloneStringMap(input map[string]string) map[string]string {
|
||||
if len(input) == 0 {
|
||||
return nil
|
||||
}
|
||||
output := make(map[string]string, len(input))
|
||||
for key, value := range input {
|
||||
output[key] = value
|
||||
}
|
||||
return output
|
||||
}
|
||||
302
backend/services/active_scheduler/core/service/session_bridge.go
Normal file
302
backend/services/active_scheduler/core/service/session_bridge.go
Normal file
@@ -0,0 +1,302 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
var (
|
||||
activeScheduleConversationNamespace = uuid.NewSHA1(uuid.NameSpaceURL, []byte("smartflow:active_schedule:conversation"))
|
||||
activeScheduleSessionNamespace = uuid.NewSHA1(uuid.NameSpaceURL, []byte("smartflow:active_schedule:session"))
|
||||
)
|
||||
|
||||
// WithActiveScheduleSessionBridge 注入主动调度 session 预创建所需的 DAO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只把 trigger -> notification 前的会话桥接能力接入 workflow;
|
||||
// 2. 不改变 dry-run / preview / notification 的主状态机;
|
||||
// 3. 为空时保留旧能力,便于局部测试与迁移期回退。
|
||||
func WithActiveScheduleSessionBridge(agentDAO *dao.AgentDAO, sessionDAO *dao.ActiveScheduleSessionDAO) TriggerWorkflowOption {
|
||||
return func(s *TriggerWorkflowService) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
s.agentDAO = agentDAO
|
||||
s.sessionDAO = sessionDAO
|
||||
}
|
||||
}
|
||||
|
||||
// bootstrapActiveScheduleConversationInTx 负责在 notification 发出前预建会话与首屏内容。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先生成确定性的 conversation/session ID,保证 trigger 重试时不会拆成多条会话;
|
||||
// 2. 再在同一事务里创建或复用 agent_chats 和 active_schedule_sessions;
|
||||
// 3. 如果是首次落库,则顺手补一条 assistant_text,必要时再补一张主动调度卡片;
|
||||
// 4. 任一步失败都直接返回 error,让上层事务整体回滚,避免“通知已发但会话底稿没落”。
|
||||
func (s *TriggerWorkflowService) bootstrapActiveScheduleConversationInTx(
|
||||
ctx context.Context,
|
||||
tx *gorm.DB,
|
||||
triggerRow model.ActiveScheduleTrigger,
|
||||
previewDetail activepreview.ActiveSchedulePreviewDetail,
|
||||
selectionResult selection.Result,
|
||||
now time.Time,
|
||||
) error {
|
||||
if s == nil {
|
||||
return errors.New("主动调度会话桥未初始化")
|
||||
}
|
||||
if s.agentDAO == nil || s.sessionDAO == nil {
|
||||
return nil
|
||||
}
|
||||
if tx == nil {
|
||||
return errors.New("gorm tx 不能为空")
|
||||
}
|
||||
if triggerRow.ID == "" {
|
||||
return errors.New("trigger_id 不能为空")
|
||||
}
|
||||
|
||||
conversationID := buildActiveScheduleConversationID(triggerRow.ID)
|
||||
sessionID := buildActiveScheduleSessionID(triggerRow.ID)
|
||||
txAgentDAO := s.agentDAO.WithTx(tx)
|
||||
txSessionDAO := s.sessionDAO.WithTx(tx)
|
||||
|
||||
if err := ensureAgentConversationExists(ctx, txAgentDAO, triggerRow.UserID, conversationID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseSeq, err := txAgentDAO.GetConversationTimelineMaxSeq(ctx, triggerRow.UserID, conversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 1. 只有首次创建会话时才写首屏消息,避免同一 trigger 的重试把时间线重复刷一遍。
|
||||
// 2. 若 timeline 已存在,说明这段主动调度会话已经被成功预热过,直接复用现成内容即可。
|
||||
if baseSeq == 0 {
|
||||
assistantText := resolveInitialActiveScheduleAssistantText(selectionResult, previewDetail)
|
||||
if assistantText != "" {
|
||||
if err := txAgentDAO.SaveChatHistoryInTx(ctx, triggerRow.UserID, conversationID, "assistant", assistantText, "", 0, 0, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := saveActiveScheduleTimelineEvent(ctx, txAgentDAO, triggerRow.UserID, conversationID, baseSeq+1, model.AgentTimelineKindAssistantText, "assistant", assistantText, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
baseSeq++
|
||||
}
|
||||
|
||||
if shouldSeedActiveSchedulePreviewCard(selectionResult) {
|
||||
cardPayload, err := buildActiveScheduleBusinessCardPayload(previewDetail)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := saveActiveScheduleTimelineEvent(ctx, txAgentDAO, triggerRow.UserID, conversationID, baseSeq+1, model.AgentTimelineKindBusinessCard, "assistant", assistantText, cardPayload); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sessionSnapshot := &model.ActiveScheduleSessionSnapshot{
|
||||
SessionID: sessionID,
|
||||
UserID: triggerRow.UserID,
|
||||
ConversationID: conversationID,
|
||||
TriggerID: triggerRow.ID,
|
||||
CurrentPreviewID: strings.TrimSpace(previewDetail.PreviewID),
|
||||
Status: resolveInitialActiveScheduleSessionStatus(selectionResult),
|
||||
State: buildInitialActiveScheduleSessionState(selectionResult, previewDetail),
|
||||
CreatedAt: now,
|
||||
UpdatedAt: now,
|
||||
}
|
||||
return txSessionDAO.UpsertActiveScheduleSession(ctx, sessionSnapshot)
|
||||
}
|
||||
|
||||
func buildActiveScheduleConversationID(triggerID string) string {
|
||||
normalized := strings.TrimSpace(triggerID)
|
||||
if normalized == "" {
|
||||
return uuid.NewString()
|
||||
}
|
||||
return uuid.NewSHA1(activeScheduleConversationNamespace, []byte(normalized)).String()
|
||||
}
|
||||
|
||||
func buildActiveScheduleSessionID(triggerID string) string {
|
||||
normalized := strings.TrimSpace(triggerID)
|
||||
if normalized == "" {
|
||||
return uuid.NewString()
|
||||
}
|
||||
return uuid.NewSHA1(activeScheduleSessionNamespace, []byte(normalized)).String()
|
||||
}
|
||||
|
||||
func ensureAgentConversationExists(ctx context.Context, agentDAO *dao.AgentDAO, userID int, conversationID string) error {
|
||||
if agentDAO == nil {
|
||||
return errors.New("agent dao 不能为空")
|
||||
}
|
||||
if userID <= 0 {
|
||||
return fmt.Errorf("invalid user_id: %d", userID)
|
||||
}
|
||||
normalizedConversationID := strings.TrimSpace(conversationID)
|
||||
if normalizedConversationID == "" {
|
||||
return errors.New("conversation_id 不能为空")
|
||||
}
|
||||
|
||||
exists, err := agentDAO.IfChatExists(ctx, userID, normalizedConversationID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if exists {
|
||||
return nil
|
||||
}
|
||||
_, err = agentDAO.CreateNewChat(userID, normalizedConversationID)
|
||||
return err
|
||||
}
|
||||
|
||||
func resolveInitialActiveScheduleAssistantText(selectionResult selection.Result, previewDetail activepreview.ActiveSchedulePreviewDetail) string {
|
||||
switch selectionResult.Action {
|
||||
case selection.ActionAskUser:
|
||||
return firstNonEmptyString(
|
||||
selectionResult.AskUserQuestion,
|
||||
selectionResult.ExplanationText,
|
||||
previewDetail.Explanation,
|
||||
previewDetail.Notification,
|
||||
"请先补充主动调度需要的关键信息。",
|
||||
)
|
||||
default:
|
||||
return firstNonEmptyString(
|
||||
selectionResult.ExplanationText,
|
||||
selectionResult.NotificationSummary,
|
||||
previewDetail.Notification,
|
||||
previewDetail.Explanation,
|
||||
"主动调度建议已更新。",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func shouldSeedActiveSchedulePreviewCard(selectionResult selection.Result) bool {
|
||||
return selectionResult.Action == selection.ActionSelectCandidate
|
||||
}
|
||||
|
||||
func resolveInitialActiveScheduleSessionStatus(selectionResult selection.Result) string {
|
||||
switch selectionResult.Action {
|
||||
case selection.ActionAskUser:
|
||||
return model.ActiveScheduleSessionStatusWaitingUserReply
|
||||
case selection.ActionSelectCandidate:
|
||||
return model.ActiveScheduleSessionStatusReadyPreview
|
||||
default:
|
||||
return model.ActiveScheduleSessionStatusIgnored
|
||||
}
|
||||
}
|
||||
|
||||
func buildInitialActiveScheduleSessionState(
|
||||
selectionResult selection.Result,
|
||||
previewDetail activepreview.ActiveSchedulePreviewDetail,
|
||||
) model.ActiveScheduleSessionState {
|
||||
state := model.ActiveScheduleSessionState{
|
||||
LastCandidateID: strings.TrimSpace(selectionResult.SelectedCandidateID),
|
||||
MissingInfo: cloneStringSlice(previewDetail.ContextSummary.MissingInfo),
|
||||
}
|
||||
if !previewDetail.ExpiresAt.IsZero() {
|
||||
expiresAt := previewDetail.ExpiresAt
|
||||
state.ExpiresAt = &expiresAt
|
||||
}
|
||||
switch selectionResult.Action {
|
||||
case selection.ActionAskUser:
|
||||
state.PendingQuestion = firstNonEmptyString(
|
||||
selectionResult.AskUserQuestion,
|
||||
selectionResult.ExplanationText,
|
||||
)
|
||||
case selection.ActionSelectCandidate:
|
||||
state.PendingQuestion = ""
|
||||
state.MissingInfo = nil
|
||||
state.FailedReason = ""
|
||||
default:
|
||||
state.PendingQuestion = ""
|
||||
state.MissingInfo = nil
|
||||
state.ExpiresAt = nil
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
func buildActiveScheduleBusinessCardPayload(detail activepreview.ActiveSchedulePreviewDetail) (map[string]any, error) {
|
||||
raw, err := json.Marshal(map[string]any{
|
||||
"business_card": map[string]any{
|
||||
"card_type": "active_schedule_preview",
|
||||
"title": "SmartFlow 日程调整建议",
|
||||
"summary": firstNonEmptyString(detail.Notification, detail.Explanation, detail.SelectedCandidate.Summary),
|
||||
"data": detail,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var payload map[string]any
|
||||
if err := json.Unmarshal(raw, &payload); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
func saveActiveScheduleTimelineEvent(
|
||||
ctx context.Context,
|
||||
agentDAO *dao.AgentDAO,
|
||||
userID int,
|
||||
conversationID string,
|
||||
seq int64,
|
||||
kind string,
|
||||
role string,
|
||||
content string,
|
||||
payload map[string]any,
|
||||
) error {
|
||||
if agentDAO == nil {
|
||||
return errors.New("agent dao 不能为空")
|
||||
}
|
||||
normalizedConversationID := strings.TrimSpace(conversationID)
|
||||
if userID <= 0 || normalizedConversationID == "" {
|
||||
return errors.New("时间线事件主键不合法")
|
||||
}
|
||||
|
||||
payloadJSON := ""
|
||||
if len(payload) > 0 {
|
||||
raw, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
payloadJSON = string(raw)
|
||||
}
|
||||
|
||||
_, _, err := agentDAO.SaveConversationTimelineEvent(ctx, model.ChatTimelinePersistPayload{
|
||||
UserID: userID,
|
||||
ConversationID: normalizedConversationID,
|
||||
Seq: seq,
|
||||
Kind: kind,
|
||||
Role: role,
|
||||
Content: content,
|
||||
PayloadJSON: payloadJSON,
|
||||
TokensConsumed: 0,
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func firstNonEmptyString(values ...string) string {
|
||||
for _, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cloneStringSlice(values []string) []string {
|
||||
if len(values) == 0 {
|
||||
return nil
|
||||
}
|
||||
cloned := make([]string, len(values))
|
||||
copy(cloned, values)
|
||||
return cloned
|
||||
}
|
||||
270
backend/services/active_scheduler/core/service/trigger.go
Normal file
270
backend/services/active_scheduler/core/service/trigger.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const triggerDedupeWindow = 30 * time.Minute
|
||||
|
||||
// TriggerRequest 是正式主动调度触发入口的请求 DTO。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责承载 API trigger、worker due job、用户反馈归一后的触发事实;
|
||||
// 2. 不承载 dry-run 结果、preview 快照或 notification provider 参数;
|
||||
// 3. Payload 只保存触发来源补充信息,不能塞任意业务写库参数。
|
||||
type TriggerRequest struct {
|
||||
UserID int
|
||||
TriggerType trigger.TriggerType
|
||||
Source trigger.Source
|
||||
TargetType trigger.TargetType
|
||||
TargetID int
|
||||
FeedbackID string
|
||||
IdempotencyKey string
|
||||
DedupeKey string
|
||||
MockNow *time.Time
|
||||
IsMockTime bool
|
||||
RequestedAt time.Time
|
||||
Payload json.RawMessage
|
||||
JobID *string
|
||||
TraceID string
|
||||
}
|
||||
|
||||
// TriggerResponse 是正式触发写入后的结果。
|
||||
type TriggerResponse struct {
|
||||
TriggerID string `json:"trigger_id"`
|
||||
Status string `json:"status"`
|
||||
PreviewID *string `json:"preview_id,omitempty"`
|
||||
DedupeHit bool `json:"dedupe_hit"`
|
||||
TraceID string `json:"trace_id,omitempty"`
|
||||
}
|
||||
|
||||
// TriggerService 负责写入正式 trigger 并发布 active_schedule.triggered 事件。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责触发信号持久化、去重和事件发布;
|
||||
// 2. 不执行 dry-run、不写 preview、不发飞书;
|
||||
// 3. outbox 未启用时返回明确错误,避免调用方误以为正式链路已启动。
|
||||
type TriggerService struct {
|
||||
activeDAO *dao.ActiveScheduleDAO
|
||||
publisher outboxinfra.EventPublisher
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func NewTriggerService(activeDAO *dao.ActiveScheduleDAO, publisher outboxinfra.EventPublisher) (*TriggerService, error) {
|
||||
if activeDAO == nil {
|
||||
return nil, errors.New("active schedule dao 不能为空")
|
||||
}
|
||||
return &TriggerService{
|
||||
activeDAO: activeDAO,
|
||||
publisher: publisher,
|
||||
clock: time.Now,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *TriggerService) SetClock(clock func() time.Time) {
|
||||
if s != nil && clock != nil {
|
||||
s.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// CreateAndPublish 创建正式 trigger 并发布 outbox 事件。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先按主动调度 trigger DTO 做入口校验,确保 mock_now 不会从 worker 入口混入;
|
||||
// 2. 再用 idempotency_key / dedupe_key 查询已有 trigger,命中则直接返回旧状态;
|
||||
// 3. 新 trigger 先落库,再发布 outbox;发布失败会把 trigger 标记 failed,便于排障;
|
||||
// 4. 返回 nil error 只表示事件已入 outbox,不表示 worker 已经生成 preview。
|
||||
func (s *TriggerService) CreateAndPublish(ctx context.Context, req TriggerRequest) (*TriggerResponse, error) {
|
||||
if s == nil || s.activeDAO == nil {
|
||||
return nil, errors.New("trigger service 未初始化")
|
||||
}
|
||||
if s.publisher == nil {
|
||||
return nil, errors.New("outbox event bus 未启用,无法执行正式主动调度 trigger")
|
||||
}
|
||||
|
||||
now := s.now()
|
||||
if req.RequestedAt.IsZero() {
|
||||
req.RequestedAt = now
|
||||
}
|
||||
if req.IsMockTime && req.MockNow == nil {
|
||||
return nil, errors.New("is_mock_time=true 时 mock_now 不能为空")
|
||||
}
|
||||
trig := trigger.ActiveScheduleTrigger{
|
||||
UserID: req.UserID,
|
||||
TriggerType: req.TriggerType,
|
||||
Source: req.Source,
|
||||
TargetType: req.TargetType,
|
||||
TargetID: req.TargetID,
|
||||
FeedbackID: req.FeedbackID,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
MockNow: req.MockNow,
|
||||
IsMockTime: req.IsMockTime,
|
||||
RequestedAt: req.RequestedAt,
|
||||
TraceID: firstNonEmpty(req.TraceID, fmt.Sprintf("trace_active_trigger_%d", now.UnixNano())),
|
||||
}
|
||||
if err := trig.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if trig.Source == trigger.SourceAPIDryRun {
|
||||
return nil, errors.New("api_dry_run 不允许创建正式 trigger")
|
||||
}
|
||||
|
||||
dedupeKey := strings.TrimSpace(req.DedupeKey)
|
||||
if dedupeKey == "" {
|
||||
dedupeKey = BuildTriggerDedupeKey(req.UserID, req.TriggerType, req.TargetType, req.TargetID, req.FeedbackID, req.IdempotencyKey, trig.EffectiveNow(req.RequestedAt))
|
||||
}
|
||||
if existing, ok, err := s.findExistingTrigger(ctx, req.UserID, string(req.TriggerType), req.IdempotencyKey, dedupeKey); err != nil {
|
||||
return nil, err
|
||||
} else if ok {
|
||||
return triggerResponseFromModel(existing, true), nil
|
||||
}
|
||||
|
||||
payloadJSON := string(req.Payload)
|
||||
if strings.TrimSpace(payloadJSON) == "" {
|
||||
payloadJSON = "{}"
|
||||
}
|
||||
triggerID := "ast_" + uuid.NewString()
|
||||
row := &model.ActiveScheduleTrigger{
|
||||
ID: triggerID,
|
||||
UserID: req.UserID,
|
||||
TriggerType: string(req.TriggerType),
|
||||
Source: string(req.Source),
|
||||
TargetType: string(req.TargetType),
|
||||
TargetID: req.TargetID,
|
||||
FeedbackID: strings.TrimSpace(req.FeedbackID),
|
||||
JobID: req.JobID,
|
||||
IdempotencyKey: strings.TrimSpace(req.IdempotencyKey),
|
||||
DedupeKey: dedupeKey,
|
||||
Status: model.ActiveScheduleTriggerStatusPending,
|
||||
MockNow: req.MockNow,
|
||||
IsMockTime: req.IsMockTime,
|
||||
RequestedAt: req.RequestedAt,
|
||||
PayloadJSON: &payloadJSON,
|
||||
TraceID: trig.TraceID,
|
||||
}
|
||||
if err := s.activeDAO.CreateTrigger(ctx, row); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
eventPayload := sharedevents.ActiveScheduleTriggeredPayload{
|
||||
TriggerID: row.ID,
|
||||
UserID: row.UserID,
|
||||
TriggerType: row.TriggerType,
|
||||
Source: row.Source,
|
||||
TargetType: row.TargetType,
|
||||
TargetID: row.TargetID,
|
||||
FeedbackID: row.FeedbackID,
|
||||
IdempotencyKey: row.IdempotencyKey,
|
||||
DedupeKey: row.DedupeKey,
|
||||
MockNow: row.MockNow,
|
||||
IsMockTime: row.IsMockTime,
|
||||
RequestedAt: row.RequestedAt,
|
||||
Payload: json.RawMessage(payloadJSON),
|
||||
TraceID: row.TraceID,
|
||||
}
|
||||
if err := eventPayload.Validate(); err != nil {
|
||||
_ = s.markTriggerFailed(ctx, row.ID, "payload_invalid", err)
|
||||
return nil, err
|
||||
}
|
||||
if err := s.publisher.Publish(ctx, outboxinfra.PublishRequest{
|
||||
EventType: sharedevents.ActiveScheduleTriggeredEventType,
|
||||
EventVersion: sharedevents.ActiveScheduleTriggeredEventVersion,
|
||||
MessageKey: eventPayload.MessageKey(),
|
||||
AggregateID: eventPayload.AggregateID(),
|
||||
Payload: eventPayload,
|
||||
}); err != nil {
|
||||
_ = s.markTriggerFailed(ctx, row.ID, "outbox_publish_failed", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return triggerResponseFromModel(row, false), nil
|
||||
}
|
||||
|
||||
func (s *TriggerService) findExistingTrigger(ctx context.Context, userID int, triggerType string, idempotencyKey string, dedupeKey string) (*model.ActiveScheduleTrigger, bool, error) {
|
||||
if strings.TrimSpace(idempotencyKey) != "" {
|
||||
existing, err := s.activeDAO.FindTriggerByIdempotencyKey(ctx, userID, triggerType, idempotencyKey)
|
||||
if err == nil {
|
||||
return existing, true, nil
|
||||
}
|
||||
if !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
statuses := []string{
|
||||
model.ActiveScheduleTriggerStatusPending,
|
||||
model.ActiveScheduleTriggerStatusProcessing,
|
||||
model.ActiveScheduleTriggerStatusPreviewGenerated,
|
||||
}
|
||||
existing, err := s.activeDAO.FindTriggerByDedupeKey(ctx, dedupeKey, statuses)
|
||||
if err == nil {
|
||||
return existing, true, nil
|
||||
}
|
||||
if !errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, false, err
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func (s *TriggerService) markTriggerFailed(ctx context.Context, triggerID string, code string, err error) error {
|
||||
message := ""
|
||||
if err != nil {
|
||||
message = err.Error()
|
||||
}
|
||||
now := s.now()
|
||||
return s.activeDAO.UpdateTriggerFields(ctx, triggerID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusFailed,
|
||||
"last_error_code": code,
|
||||
"last_error": &message,
|
||||
"completed_at": &now,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TriggerService) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
// BuildTriggerDedupeKey 生成正式触发去重键。
|
||||
//
|
||||
// 说明:
|
||||
// 1. important_urgent_task 按 30 分钟窗口聚合,避免同一任务反复生成预览;
|
||||
// 2. unfinished_feedback 优先使用 feedback_id/idempotency_key,不做固定时间窗强去重;
|
||||
// 3. 参数非法时仍返回可读字符串,调用方会在 trigger.Validate 阶段拒绝非法输入。
|
||||
func BuildTriggerDedupeKey(userID int, triggerType trigger.TriggerType, targetType trigger.TargetType, targetID int, feedbackID string, idempotencyKey string, at time.Time) string {
|
||||
if triggerType == trigger.TriggerTypeUnfinishedFeedback {
|
||||
return fmt.Sprintf("%d:%s:%s", userID, triggerType, firstNonEmpty(feedbackID, idempotencyKey, fmt.Sprintf("%s:%d", targetType, targetID)))
|
||||
}
|
||||
if at.IsZero() {
|
||||
at = time.Now()
|
||||
}
|
||||
windowStart := at.Truncate(triggerDedupeWindow)
|
||||
return fmt.Sprintf("%d:%s:%s:%d:%s", userID, triggerType, targetType, targetID, windowStart.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
func triggerResponseFromModel(row *model.ActiveScheduleTrigger, dedupeHit bool) *TriggerResponse {
|
||||
if row == nil {
|
||||
return &TriggerResponse{DedupeHit: dedupeHit}
|
||||
}
|
||||
return &TriggerResponse{
|
||||
TriggerID: row.ID,
|
||||
Status: row.Status,
|
||||
PreviewID: row.PreviewID,
|
||||
DedupeHit: dedupeHit,
|
||||
TraceID: row.TraceID,
|
||||
}
|
||||
}
|
||||
219
backend/services/active_scheduler/core/service/trigger_outbox.go
Normal file
219
backend/services/active_scheduler/core/service/trigger_outbox.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
)
|
||||
|
||||
// EnqueueActiveScheduleTriggeredInTx 在事务内写入 active_schedule.triggered outbox 消息。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责把已经校验好的事件契约写入 outbox;
|
||||
// 2. 不负责创建 trigger 记录,trigger 真值应由调用方先落库;
|
||||
// 3. 失败时返回 error,让上层决定是否整体回滚与重试。
|
||||
func EnqueueActiveScheduleTriggeredInTx(
|
||||
ctx context.Context,
|
||||
outboxRepo *outboxinfra.Repository,
|
||||
maxRetry int,
|
||||
payload sharedevents.ActiveScheduleTriggeredPayload,
|
||||
) error {
|
||||
return enqueueContractEventInTx(
|
||||
ctx,
|
||||
outboxRepo,
|
||||
maxRetry,
|
||||
sharedevents.ActiveScheduleTriggeredEventType,
|
||||
sharedevents.ActiveScheduleTriggeredEventVersion,
|
||||
payload.MessageKey(),
|
||||
payload.AggregateID(),
|
||||
payload.AggregateID(),
|
||||
payload,
|
||||
payload.Validate,
|
||||
)
|
||||
}
|
||||
|
||||
// EnqueueNotificationFeishuRequestedInTx 在事务内写入 notification.feishu.requested outbox 消息。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做事件契约序列化和 outbox 入队;
|
||||
// 2. 不负责 notification_records 幂等与 provider 调用;
|
||||
// 3. 失败时直接返回,让 trigger -> preview -> notification 保持同事务回滚。
|
||||
func EnqueueNotificationFeishuRequestedInTx(
|
||||
ctx context.Context,
|
||||
outboxRepo *outboxinfra.Repository,
|
||||
maxRetry int,
|
||||
payload sharedevents.FeishuNotificationRequestedPayload,
|
||||
) error {
|
||||
if err := ensureNotificationFeishuOutboxRoute(); err != nil {
|
||||
return err
|
||||
}
|
||||
return enqueueContractEventInTx(
|
||||
ctx,
|
||||
outboxRepo,
|
||||
maxRetry,
|
||||
sharedevents.NotificationFeishuRequestedEventType,
|
||||
sharedevents.NotificationFeishuRequestedEventVersion,
|
||||
payload.MessageKey(),
|
||||
payload.AggregateID(),
|
||||
payload.AggregateID(),
|
||||
payload,
|
||||
payload.Validate,
|
||||
)
|
||||
}
|
||||
|
||||
// BuildTriggeredPayloadFromModel 把持久化 trigger 还原成事件载荷。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做 model -> contract DTO 映射;
|
||||
// 2. 不校验 trigger 是否应该被处理,业务真值判断由 scanner / worker 完成;
|
||||
// 3. 若 payload_json 不是合法 JSON,返回 error,让调用方回滚本次触发。
|
||||
// ensureNotificationFeishuOutboxRoute 确保 publisher 侧能把飞书通知事件写入 notification outbox。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 这里只登记 event_type -> notification 服务归属,不注册 handler,也不启动单体旧消费者;
|
||||
// 2. RegisterEventService 本身幂等,重复调用用于覆盖 API/worker 不同启动路径;
|
||||
// 3. 若路由登记失败,直接返回给事务调用方,让 trigger 与 notification 入队一起回滚。
|
||||
func ensureNotificationFeishuOutboxRoute() error {
|
||||
return outboxinfra.RegisterEventService(sharedevents.NotificationFeishuRequestedEventType, outboxinfra.ServiceNotification)
|
||||
}
|
||||
|
||||
func BuildTriggeredPayloadFromModel(row model.ActiveScheduleTrigger) (sharedevents.ActiveScheduleTriggeredPayload, error) {
|
||||
var rawPayload json.RawMessage
|
||||
if row.PayloadJSON != nil && strings.TrimSpace(*row.PayloadJSON) != "" {
|
||||
rawPayload = json.RawMessage(strings.TrimSpace(*row.PayloadJSON))
|
||||
if !json.Valid(rawPayload) {
|
||||
return sharedevents.ActiveScheduleTriggeredPayload{}, errors.New("trigger payload_json 不是合法 JSON")
|
||||
}
|
||||
}
|
||||
|
||||
payload := sharedevents.ActiveScheduleTriggeredPayload{
|
||||
TriggerID: row.ID,
|
||||
UserID: row.UserID,
|
||||
TriggerType: row.TriggerType,
|
||||
Source: row.Source,
|
||||
TargetType: row.TargetType,
|
||||
TargetID: row.TargetID,
|
||||
FeedbackID: row.FeedbackID,
|
||||
IdempotencyKey: row.IdempotencyKey,
|
||||
DedupeKey: row.DedupeKey,
|
||||
MockNow: row.MockNow,
|
||||
IsMockTime: row.IsMockTime,
|
||||
RequestedAt: row.RequestedAt,
|
||||
Payload: rawPayload,
|
||||
TraceID: row.TraceID,
|
||||
}
|
||||
if err := payload.Validate(); err != nil {
|
||||
return sharedevents.ActiveScheduleTriggeredPayload{}, err
|
||||
}
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// BuildFeishuRequestedPayload 生成通知事件载荷。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做 trigger/preview 快照到通知契约的拼装;
|
||||
// 2. 不判断是否真的要发通知,上层应先根据 decision.ShouldNotify 决定是否调用;
|
||||
// 3. fallback 文案只做兜底,不替代后续 notification handler 的 provider 级策略。
|
||||
func BuildFeishuRequestedPayload(
|
||||
triggerRow model.ActiveScheduleTrigger,
|
||||
previewID string,
|
||||
notificationSummary string,
|
||||
requestedAt time.Time,
|
||||
) sharedevents.FeishuNotificationRequestedPayload {
|
||||
summary := strings.TrimSpace(notificationSummary)
|
||||
targetURL := fmt.Sprintf("/assistant/%s", buildActiveScheduleConversationID(triggerRow.ID))
|
||||
return sharedevents.FeishuNotificationRequestedPayload{
|
||||
UserID: triggerRow.UserID,
|
||||
TriggerID: triggerRow.ID,
|
||||
PreviewID: strings.TrimSpace(previewID),
|
||||
TriggerType: triggerRow.TriggerType,
|
||||
TargetType: triggerRow.TargetType,
|
||||
TargetID: triggerRow.TargetID,
|
||||
DedupeKey: BuildNotificationDedupeKey(triggerRow.UserID, triggerRow.TriggerType, triggerRow.RequestedAt),
|
||||
TargetURL: targetURL,
|
||||
SummaryText: summary,
|
||||
FallbackText: buildNotificationFallbackText(summary, targetURL),
|
||||
TraceID: triggerRow.TraceID,
|
||||
RequestedAt: requestedAt,
|
||||
}
|
||||
}
|
||||
|
||||
// BuildNotificationDedupeKey 生成通知 30 分钟窗口去重键。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 第一版按 user_id + trigger_type + time_window 聚合;
|
||||
// 2. 当 requested_at 缺失时回退到当前时间,避免空值直接写出脏 dedupe_key;
|
||||
// 3. 不拼 preview_id,保证同一窗口内多次重试只会落到同一组通知记录。
|
||||
func BuildNotificationDedupeKey(userID int, triggerType string, requestedAt time.Time) string {
|
||||
if requestedAt.IsZero() {
|
||||
requestedAt = time.Now()
|
||||
}
|
||||
return sharedevents.BuildFeishuNotificationDedupeKey(userID, triggerType, requestedAt, sharedevents.DefaultFeishuNotificationDedupeWindow)
|
||||
}
|
||||
|
||||
func enqueueContractEventInTx(
|
||||
ctx context.Context,
|
||||
outboxRepo *outboxinfra.Repository,
|
||||
maxRetry int,
|
||||
eventType string,
|
||||
eventVersion string,
|
||||
messageKey string,
|
||||
aggregateID string,
|
||||
eventID string,
|
||||
payload any,
|
||||
validate func() error,
|
||||
) error {
|
||||
if outboxRepo == nil {
|
||||
return errors.New("outbox repository 不能为空")
|
||||
}
|
||||
if validate == nil {
|
||||
return errors.New("事件校验函数不能为空")
|
||||
}
|
||||
if err := validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
payloadJSON, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if maxRetry <= 0 {
|
||||
maxRetry = 20
|
||||
}
|
||||
|
||||
wrapped := outboxinfra.OutboxEventPayload{
|
||||
EventID: strings.TrimSpace(eventID),
|
||||
EventType: eventType,
|
||||
EventVersion: strings.TrimSpace(eventVersion),
|
||||
AggregateID: strings.TrimSpace(aggregateID),
|
||||
Payload: payloadJSON,
|
||||
}
|
||||
// 1. 这里只负责把已经校验过的事件契约写入 outbox;具体 service/table/topic 由仓库按 eventType 解析。
|
||||
// 2. 这样 active scheduler 侧不再显式依赖 topic,后续切服务级路由时只需要维护事件归属表。
|
||||
_, err = outboxRepo.CreateMessage(ctx, eventType, strings.TrimSpace(messageKey), wrapped, maxRetry)
|
||||
return err
|
||||
}
|
||||
|
||||
func buildNotificationFallbackText(summary string, targetURL string) string {
|
||||
link := strings.TrimSpace(targetURL)
|
||||
if summary == "" {
|
||||
return "你有一条新的日程调整建议,请查看:" + link
|
||||
}
|
||||
return summary + ",请查看:" + link
|
||||
}
|
||||
|
||||
func firstNonEmpty(values ...string) string {
|
||||
for _, value := range values {
|
||||
if strings.TrimSpace(value) != "" {
|
||||
return strings.TrimSpace(value)
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -0,0 +1,375 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
const (
|
||||
triggerErrorCodePayloadMismatch = "payload_mismatch"
|
||||
triggerErrorCodeWorkerFailed = "worker_failed"
|
||||
)
|
||||
|
||||
// TriggerWorkflowService 负责第四阶段的 trigger -> dry-run -> preview -> notification 编排。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只推进主动调度 trigger 的后台状态机,不负责启动 outbox worker;
|
||||
// 2. dry-run 与选择器都复用 active_scheduler 独立模块,不再往 newAgent 里塞主动调度逻辑;
|
||||
// 3. notification 只发布 requested 事件,不直接接真实飞书 provider。
|
||||
type TriggerWorkflowService struct {
|
||||
activeDAO *dao.ActiveScheduleDAO
|
||||
graphRunner *activegraph.Runner
|
||||
outbox *outboxinfra.Repository
|
||||
kafkaCfg kafkabus.Config
|
||||
agentDAO *dao.AgentDAO
|
||||
sessionDAO *dao.ActiveScheduleSessionDAO
|
||||
clock func() time.Time
|
||||
}
|
||||
|
||||
func NewTriggerWorkflowService(
|
||||
activeDAO *dao.ActiveScheduleDAO,
|
||||
graphRunner *activegraph.Runner,
|
||||
outboxRepo *outboxinfra.Repository,
|
||||
kafkaCfg kafkabus.Config,
|
||||
) (*TriggerWorkflowService, error) {
|
||||
return NewTriggerWorkflowServiceWithOptions(activeDAO, graphRunner, outboxRepo, kafkaCfg)
|
||||
}
|
||||
|
||||
// NewTriggerWorkflowServiceWithOptions 创建主动调度 trigger 编排服务,并允许注入迁移期可选能力。
|
||||
func NewTriggerWorkflowServiceWithOptions(
|
||||
activeDAO *dao.ActiveScheduleDAO,
|
||||
graphRunner *activegraph.Runner,
|
||||
outboxRepo *outboxinfra.Repository,
|
||||
kafkaCfg kafkabus.Config,
|
||||
opts ...TriggerWorkflowOption,
|
||||
) (*TriggerWorkflowService, error) {
|
||||
if activeDAO == nil {
|
||||
return nil, errors.New("active schedule dao 不能为空")
|
||||
}
|
||||
if graphRunner == nil {
|
||||
return nil, errors.New("active scheduler graph runner 不能为空")
|
||||
}
|
||||
if outboxRepo == nil {
|
||||
return nil, errors.New("outbox repository 不能为空")
|
||||
}
|
||||
svc := &TriggerWorkflowService{
|
||||
activeDAO: activeDAO,
|
||||
graphRunner: graphRunner,
|
||||
outbox: outboxRepo,
|
||||
kafkaCfg: kafkaCfg,
|
||||
clock: time.Now,
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if opt != nil {
|
||||
opt(svc)
|
||||
}
|
||||
}
|
||||
return svc, nil
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) SetClock(clock func() time.Time) {
|
||||
if s != nil && clock != nil {
|
||||
s.clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
// TriggerWorkflowOption 是 trigger 编排服务的可选注入项。
|
||||
type TriggerWorkflowOption func(*TriggerWorkflowService)
|
||||
|
||||
// ProcessTriggeredInTx 在 outbox 消费事务内推进 trigger 主链路。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先锁 trigger 行,确保同一 trigger 在并发 worker 下只能由一个事务推进;
|
||||
// 2. 再把状态切到 processing,避免排障时看不出消息已经被消费;
|
||||
// 3. 复用 active scheduler graph 跑 dry-run + 受限选择;若发现已有 preview,则直接复用,避免重复写库;
|
||||
// 4. preview 成功后回写 trigger 状态,并在同一事务里补发 notification.requested outbox;
|
||||
// 5. 任一步失败都返回 error,由外层 handler 负责记录 failed 状态并触发 outbox retry。
|
||||
func (s *TriggerWorkflowService) ProcessTriggeredInTx(
|
||||
ctx context.Context,
|
||||
tx *gorm.DB,
|
||||
payload sharedevents.ActiveScheduleTriggeredPayload,
|
||||
) error {
|
||||
if s == nil || s.activeDAO == nil || s.graphRunner == nil || s.outbox == nil {
|
||||
return errors.New("trigger workflow service 未初始化")
|
||||
}
|
||||
if tx == nil {
|
||||
return errors.New("gorm tx 不能为空")
|
||||
}
|
||||
if err := payload.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
now := s.now()
|
||||
triggerRow, err := s.lockTrigger(ctx, tx, payload.TriggerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
txDAO := s.activeDAO.WithTx(tx)
|
||||
if completed, err := s.tryFinishByTerminalStatus(ctx, txDAO, *triggerRow); err != nil || completed {
|
||||
return err
|
||||
}
|
||||
if handled, err := s.tryRejectMismatchedPayload(ctx, txDAO, *triggerRow, payload, now); err != nil || handled {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := txDAO.UpdateTriggerFields(ctx, triggerRow.ID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusProcessing,
|
||||
"processed_at": &now,
|
||||
"last_error_code": nil,
|
||||
"last_error": nil,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
existingPreview, err := txDAO.GetPreviewByTriggerID(ctx, triggerRow.ID)
|
||||
switch {
|
||||
case err == nil:
|
||||
return s.finishWithExistingPreview(ctx, txDAO, *triggerRow, *existingPreview, now)
|
||||
case errors.Is(err, gorm.ErrRecordNotFound):
|
||||
// 继续创建新 preview。
|
||||
default:
|
||||
return err
|
||||
}
|
||||
|
||||
domainTrigger := buildDomainTriggerFromModel(*triggerRow, payload)
|
||||
graphResult, err := s.graphRunner.Run(ctx, domainTrigger)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if graphResult == nil || graphResult.DryRunData == nil {
|
||||
return errors.New("active scheduler graph 返回空结果")
|
||||
}
|
||||
dryRunData := graphResult.DryRunData
|
||||
if len(dryRunData.Candidates) == 0 {
|
||||
return s.markClosedWithoutPreview(ctx, txDAO, triggerRow.ID, now)
|
||||
}
|
||||
if !dryRunData.Observation.Decision.ShouldNotify && !dryRunData.Observation.Decision.ShouldWritePreview {
|
||||
return s.markClosedWithoutPreview(ctx, txDAO, triggerRow.ID, now)
|
||||
}
|
||||
|
||||
previewService, err := activepreview.NewService(txDAO)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
previewResp, err := previewService.CreatePreview(ctx, activepreview.CreatePreviewRequest{
|
||||
ActiveContext: dryRunData.Context,
|
||||
Observation: dryRunData.Observation,
|
||||
Candidates: dryRunData.Candidates,
|
||||
TriggerID: triggerRow.ID,
|
||||
GeneratedAt: now,
|
||||
SelectedCandidateID: graphResult.SelectionResult.SelectedCandidateID,
|
||||
ExplanationText: graphResult.SelectionResult.ExplanationText,
|
||||
NotificationSummary: graphResult.SelectionResult.NotificationSummary,
|
||||
FallbackUsed: graphResult.SelectionResult.FallbackUsed,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
previewID := previewResp.Detail.PreviewID
|
||||
if err = txDAO.UpdateTriggerFields(ctx, triggerRow.ID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusPreviewGenerated,
|
||||
"preview_id": &previewID,
|
||||
"completed_at": &now,
|
||||
"last_error_code": nil,
|
||||
"last_error": nil,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !dryRunData.Observation.Decision.ShouldNotify {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 1. 离线通知发出前,先把用户点击后要进入的助手会话和主动调度 session 预热好。
|
||||
// 2. 这一步和 preview / notification outbox 在同一事务内提交,避免出现“飞书已送达但会话空白”的断裂状态。
|
||||
if err := s.bootstrapActiveScheduleConversationInTx(ctx, tx, *triggerRow, previewResp.Detail, graphResult.SelectionResult, now); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notificationPayload := BuildFeishuRequestedPayload(
|
||||
*triggerRow,
|
||||
previewID,
|
||||
previewResp.Detail.Notification,
|
||||
now,
|
||||
)
|
||||
return EnqueueNotificationFeishuRequestedInTx(ctx, s.outbox.WithTx(tx), s.kafkaCfg.MaxRetry, notificationPayload)
|
||||
}
|
||||
|
||||
// MarkTriggerFailedBestEffort 在事务外补记 trigger failed 状态,供 outbox retry 前排障。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做 best-effort 状态回写,不能影响外层对原始错误的返回;
|
||||
// 2. 不负责错误分类,当前统一记为 worker_failed;
|
||||
// 3. 失败时静默返回,让真正的重试仍由 outbox 状态机负责。
|
||||
func (s *TriggerWorkflowService) MarkTriggerFailedBestEffort(ctx context.Context, triggerID string, err error) {
|
||||
if s == nil || s.activeDAO == nil || strings.TrimSpace(triggerID) == "" {
|
||||
return
|
||||
}
|
||||
message := ""
|
||||
if err != nil {
|
||||
message = err.Error()
|
||||
}
|
||||
_ = s.activeDAO.UpdateTriggerFields(ctx, triggerID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusFailed,
|
||||
"last_error_code": triggerErrorCodeWorkerFailed,
|
||||
"last_error": &message,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) lockTrigger(ctx context.Context, tx *gorm.DB, triggerID string) (*model.ActiveScheduleTrigger, error) {
|
||||
var row model.ActiveScheduleTrigger
|
||||
err := tx.WithContext(ctx).
|
||||
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||
Where("id = ?", triggerID).
|
||||
First(&row).Error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &row, nil
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) tryFinishByTerminalStatus(
|
||||
ctx context.Context,
|
||||
txDAO *dao.ActiveScheduleDAO,
|
||||
row model.ActiveScheduleTrigger,
|
||||
) (bool, error) {
|
||||
switch row.Status {
|
||||
case model.ActiveScheduleTriggerStatusPreviewGenerated,
|
||||
model.ActiveScheduleTriggerStatusClosed,
|
||||
model.ActiveScheduleTriggerStatusSkipped,
|
||||
model.ActiveScheduleTriggerStatusRejected:
|
||||
return true, nil
|
||||
case model.ActiveScheduleTriggerStatusPending,
|
||||
model.ActiveScheduleTriggerStatusProcessing,
|
||||
model.ActiveScheduleTriggerStatusFailed:
|
||||
return false, nil
|
||||
default:
|
||||
// 1. 遇到未知状态时,不直接报错中断,而是继续按 processing 流程推进。
|
||||
// 2. 这样可以兼容迁移期历史脏数据,避免单条异常阻塞整批消费。
|
||||
// 3. 真实状态最终会被下面的 UpdateTriggerFields 覆盖为 processing。
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) tryRejectMismatchedPayload(
|
||||
ctx context.Context,
|
||||
txDAO *dao.ActiveScheduleDAO,
|
||||
row model.ActiveScheduleTrigger,
|
||||
payload sharedevents.ActiveScheduleTriggeredPayload,
|
||||
now time.Time,
|
||||
) (bool, error) {
|
||||
mismatchReason := buildPayloadMismatchReason(row, payload)
|
||||
if mismatchReason == "" {
|
||||
return false, nil
|
||||
}
|
||||
if err := txDAO.UpdateTriggerFields(ctx, row.ID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusRejected,
|
||||
"last_error_code": triggerErrorCodePayloadMismatch,
|
||||
"last_error": &mismatchReason,
|
||||
"completed_at": &now,
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) finishWithExistingPreview(
|
||||
ctx context.Context,
|
||||
txDAO *dao.ActiveScheduleDAO,
|
||||
triggerRow model.ActiveScheduleTrigger,
|
||||
previewRow model.ActiveSchedulePreview,
|
||||
now time.Time,
|
||||
) error {
|
||||
previewID := previewRow.ID
|
||||
return txDAO.UpdateTriggerFields(ctx, triggerRow.ID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusPreviewGenerated,
|
||||
"preview_id": &previewID,
|
||||
"completed_at": &now,
|
||||
"last_error_code": nil,
|
||||
"last_error": nil,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) markClosedWithoutPreview(
|
||||
ctx context.Context,
|
||||
txDAO *dao.ActiveScheduleDAO,
|
||||
triggerID string,
|
||||
now time.Time,
|
||||
) error {
|
||||
return txDAO.UpdateTriggerFields(ctx, triggerID, map[string]any{
|
||||
"status": model.ActiveScheduleTriggerStatusClosed,
|
||||
"completed_at": &now,
|
||||
"last_error_code": nil,
|
||||
"last_error": nil,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *TriggerWorkflowService) now() time.Time {
|
||||
if s == nil || s.clock == nil {
|
||||
return time.Now()
|
||||
}
|
||||
return s.clock()
|
||||
}
|
||||
|
||||
func buildDomainTriggerFromModel(
|
||||
row model.ActiveScheduleTrigger,
|
||||
payload sharedevents.ActiveScheduleTriggeredPayload,
|
||||
) trigger.ActiveScheduleTrigger {
|
||||
mockNow := row.MockNow
|
||||
if mockNow == nil && payload.MockNow != nil {
|
||||
mockNow = payload.MockNow
|
||||
}
|
||||
traceID := strings.TrimSpace(row.TraceID)
|
||||
if traceID == "" {
|
||||
traceID = strings.TrimSpace(payload.TraceID)
|
||||
}
|
||||
if traceID == "" {
|
||||
traceID = "trace_active_trigger_" + uuid.NewString()
|
||||
}
|
||||
return trigger.ActiveScheduleTrigger{
|
||||
TriggerID: row.ID,
|
||||
UserID: row.UserID,
|
||||
TriggerType: trigger.TriggerType(row.TriggerType),
|
||||
Source: trigger.Source(row.Source),
|
||||
TargetType: trigger.TargetType(row.TargetType),
|
||||
TargetID: row.TargetID,
|
||||
FeedbackID: row.FeedbackID,
|
||||
IdempotencyKey: row.IdempotencyKey,
|
||||
MockNow: mockNow,
|
||||
IsMockTime: row.IsMockTime || payload.IsMockTime,
|
||||
RequestedAt: row.RequestedAt,
|
||||
TraceID: traceID,
|
||||
}
|
||||
}
|
||||
|
||||
func buildPayloadMismatchReason(row model.ActiveScheduleTrigger, payload sharedevents.ActiveScheduleTriggeredPayload) string {
|
||||
switch {
|
||||
case row.UserID != payload.UserID:
|
||||
return fmt.Sprintf("trigger 事件 user_id 不一致: row=%d payload=%d", row.UserID, payload.UserID)
|
||||
case row.TriggerType != payload.TriggerType:
|
||||
return fmt.Sprintf("trigger 事件 trigger_type 不一致: row=%s payload=%s", row.TriggerType, payload.TriggerType)
|
||||
case row.TargetType != payload.TargetType:
|
||||
return fmt.Sprintf("trigger 事件 target_type 不一致: row=%s payload=%s", row.TargetType, payload.TargetType)
|
||||
case row.TargetID != payload.TargetID:
|
||||
return fmt.Sprintf("trigger 事件 target_id 不一致: row=%d payload=%d", row.TargetID, payload.TargetID)
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
103
backend/services/active_scheduler/core/trigger/types.go
Normal file
103
backend/services/active_scheduler/core/trigger/types.go
Normal file
@@ -0,0 +1,103 @@
|
||||
package trigger
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TriggerType 是主动调度第一版允许进入 dry-run 主链路的触发类型。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只表达触发信号分类;
|
||||
// 2. 不负责判断任务是否真的需要调度;
|
||||
// 3. 不承载 preview、notification 或 apply 状态。
|
||||
type TriggerType string
|
||||
|
||||
const (
|
||||
TriggerTypeImportantUrgentTask TriggerType = "important_urgent_task"
|
||||
TriggerTypeUnfinishedFeedback TriggerType = "unfinished_feedback"
|
||||
)
|
||||
|
||||
// Source 表示触发信号来源;dry-run 第一版只消费该字段用于审计和 mock_now 校验。
|
||||
type Source string
|
||||
|
||||
const (
|
||||
SourceWorkerDueJob Source = "worker_due_job"
|
||||
SourceAPITrigger Source = "api_trigger"
|
||||
SourceAPIDryRun Source = "api_dry_run"
|
||||
SourceUserFeedback Source = "user_feedback"
|
||||
)
|
||||
|
||||
// TargetType 表示触发信号指向的业务对象类型。
|
||||
type TargetType string
|
||||
|
||||
const (
|
||||
TargetTypeTaskPool TargetType = "task_pool"
|
||||
TargetTypeScheduleEvent TargetType = "schedule_event"
|
||||
TargetTypeTaskItem TargetType = "task_item"
|
||||
)
|
||||
|
||||
// ActiveScheduleTrigger 是主动调度主链路的统一输入。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责承载 API dry-run、正式 trigger、worker 与用户反馈归一后的输入;
|
||||
// 2. 不负责读取任务、日程或反馈事实;
|
||||
// 3. TargetID 在 unfinished_feedback 且反馈目标未知时允许为 0,由观察链路转成 ask_user。
|
||||
type ActiveScheduleTrigger struct {
|
||||
TriggerID string
|
||||
UserID int
|
||||
TriggerType TriggerType
|
||||
Source Source
|
||||
TargetType TargetType
|
||||
TargetID int
|
||||
FeedbackID string
|
||||
IdempotencyKey string
|
||||
MockNow *time.Time
|
||||
IsMockTime bool
|
||||
RequestedAt time.Time
|
||||
TraceID string
|
||||
}
|
||||
|
||||
// Validate 校验触发信号是否能进入主动调度 dry-run 主链路。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做枚举、归属与 mock_now 入口级校验;
|
||||
// 2. 不判断目标是否存在,也不判断是否应生成候选;
|
||||
// 3. 返回 nil 表示可以继续构造上下文,error 表示调用方应直接拒绝请求。
|
||||
func (t ActiveScheduleTrigger) Validate() error {
|
||||
if t.UserID <= 0 {
|
||||
return errors.New("user_id 必须大于 0")
|
||||
}
|
||||
if t.TriggerType != TriggerTypeImportantUrgentTask && t.TriggerType != TriggerTypeUnfinishedFeedback {
|
||||
return errors.New("trigger_type 不受支持")
|
||||
}
|
||||
if t.Source != SourceWorkerDueJob && t.Source != SourceAPITrigger && t.Source != SourceAPIDryRun && t.Source != SourceUserFeedback {
|
||||
return errors.New("source 不受支持")
|
||||
}
|
||||
if t.TargetType != TargetTypeTaskPool && t.TargetType != TargetTypeScheduleEvent && t.TargetType != TargetTypeTaskItem {
|
||||
return errors.New("target_type 不受支持")
|
||||
}
|
||||
if t.TargetID <= 0 && t.TriggerType != TriggerTypeUnfinishedFeedback {
|
||||
return errors.New("target_id 必须大于 0")
|
||||
}
|
||||
if t.MockNow != nil && t.Source != SourceAPIDryRun && t.Source != SourceAPITrigger {
|
||||
return errors.New("mock_now 只允许 API dry-run 或 API trigger 使用")
|
||||
}
|
||||
if t.MockNow != nil && !t.IsMockTime {
|
||||
return errors.New("传入 mock_now 时必须显式标记 is_mock_time")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EffectiveNow 返回主动调度本次运行应使用的业务当前时间。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. dry-run / 测试 trigger 可使用 MockNow;
|
||||
// 2. 后台 worker 使用调用方传入的真实 now;
|
||||
// 3. 不负责时区转换,调用方应保证 now 与用户时区语义一致。
|
||||
func (t ActiveScheduleTrigger) EffectiveNow(realNow time.Time) time.Time {
|
||||
if t.MockNow != nil {
|
||||
return *t.MockNow
|
||||
}
|
||||
return realNow
|
||||
}
|
||||
131
backend/services/active_scheduler/dao/connect.go
Normal file
131
backend/services/active_scheduler/dao/connect.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package dao
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
coremodel "github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/spf13/viper"
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// OpenDBFromConfig 创建 active-scheduler 服务自己的数据库句柄。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只迁移 active-scheduler 拥有的 trigger / preview / job / session 表和本服务 outbox 表;
|
||||
// 2. 不迁移 task、schedule、agent、notification 或 user/auth 表,避免独立进程越权管理其它服务模型;
|
||||
// 3. 返回的 *gorm.DB 供服务内主链路、due job scanner 和 outbox consumer 复用。
|
||||
func OpenDBFromConfig() (*gorm.DB, error) {
|
||||
host := viper.GetString("database.host")
|
||||
port := viper.GetString("database.port")
|
||||
user := viper.GetString("database.user")
|
||||
password := viper.GetString("database.password")
|
||||
dbname := viper.GetString("database.dbname")
|
||||
|
||||
dsn := fmt.Sprintf(
|
||||
"%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||
user, password, host, port, dbname,
|
||||
)
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = db.AutoMigrate(
|
||||
&coremodel.ActiveScheduleJob{},
|
||||
&coremodel.ActiveScheduleTrigger{},
|
||||
&coremodel.ActiveSchedulePreview{},
|
||||
&coremodel.ActiveScheduleSession{},
|
||||
); err != nil {
|
||||
return nil, fmt.Errorf("auto migrate active-scheduler tables failed: %w", err)
|
||||
}
|
||||
if err = autoMigrateActiveSchedulerOutboxTable(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = ensureRuntimeDependencyTables(db); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// autoMigrateActiveSchedulerOutboxTable 只迁移 active-scheduler 服务自己的 outbox 物理表。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责 active-scheduler.outbox 对应表,不碰其它服务 outbox;
|
||||
// 2. 让独立 active-scheduler 服务可以单独发布 trigger 并消费 active_schedule.triggered;
|
||||
// 3. 若后续调整 outbox 表名,只改 service catalog,不在这里硬编码。
|
||||
func autoMigrateActiveSchedulerOutboxTable(db *gorm.DB) error {
|
||||
cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceActiveScheduler)
|
||||
if !ok {
|
||||
return fmt.Errorf("resolve active-scheduler outbox config failed")
|
||||
}
|
||||
if err := db.Table(cfg.TableName).AutoMigrate(&coremodel.AgentOutboxMessage{}); err != nil {
|
||||
return fmt.Errorf("auto migrate active-scheduler outbox table failed for %s (%s): %w", cfg.Name, cfg.TableName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type runtimeDependencyTable struct {
|
||||
Name string
|
||||
Reason string
|
||||
}
|
||||
|
||||
// ensureRuntimeDependencyTables 在服务启动期校验迁移期共享主库依赖。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只检查表是否存在,不 AutoMigrate、不补列、不修改任何跨域表;
|
||||
// 2. 把 active-scheduler 运行时仍然需要的 task / schedule / agent / notification outbox 边界显式化;
|
||||
// 3. 若部署顺序、库权限或表结构归属不满足,启动阶段直接 fail fast,避免第一次 trigger 才反复重试。
|
||||
func ensureRuntimeDependencyTables(db *gorm.DB) error {
|
||||
if db == nil {
|
||||
return fmt.Errorf("active-scheduler runtime dependency check failed: db is nil")
|
||||
}
|
||||
for _, table := range activeSchedulerRuntimeDependencyTables() {
|
||||
if err := ensureTableExists(db, table); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureTableExists 只做存在性探测。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 不负责判断字段是否兼容,字段级契约由拥有该表的服务迁移脚本保证;
|
||||
// 2. 不负责自动修复缺失表,避免 active-scheduler 越权创建其它服务的数据模型;
|
||||
// 3. 返回错误会阻止服务启动,让部署问题尽早显现。
|
||||
func ensureTableExists(db *gorm.DB, table runtimeDependencyTable) error {
|
||||
if table.Name == "" {
|
||||
return fmt.Errorf("active-scheduler runtime dependency table name is empty: %s", table.Reason)
|
||||
}
|
||||
if db.Migrator().HasTable(table.Name) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("active-scheduler runtime dependency table missing: %s (%s)", table.Name, table.Reason)
|
||||
}
|
||||
|
||||
// activeSchedulerRuntimeDependencyTables 列出迁移期运行仍需共享主库访问的外部表。
|
||||
//
|
||||
// 说明:
|
||||
// 1. active-scheduler 自有表在 OpenDBFromConfig 内迁移,这里只放跨域依赖;
|
||||
// 2. notification outbox 表名来自 service catalog,避免和 outbox 多表路由配置漂移;
|
||||
// 3. 后续切到 task/schedule/agent/notification RPC 或 read model 后,应从这里移除对应表依赖。
|
||||
func activeSchedulerRuntimeDependencyTables() []runtimeDependencyTable {
|
||||
notificationOutboxTable := "notification_outbox_messages"
|
||||
if cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceNotification); ok && cfg.TableName != "" {
|
||||
notificationOutboxTable = cfg.TableName
|
||||
}
|
||||
|
||||
return []runtimeDependencyTable{
|
||||
{Name: "tasks", Reason: "dry-run 读取 task_pool 事实,confirm 时锁定 task_pool 目标"},
|
||||
{Name: "schedule_events", Reason: "dry-run 读取日程事实,confirm 时写入正式日程事件"},
|
||||
{Name: "schedules", Reason: "dry-run 读取节次占用,confirm 时写入正式节次"},
|
||||
{Name: "task_classes", Reason: "confirm create_makeup 时校验 task_item 归属"},
|
||||
{Name: "task_items", Reason: "confirm create_makeup 时锁定 task_item 目标"},
|
||||
{Name: "agent_chats", Reason: "trigger 生成 preview 后预建主动调度会话"},
|
||||
{Name: "chat_histories", Reason: "trigger 生成 preview 后写入会话首屏消息"},
|
||||
{Name: "agent_timeline_events", Reason: "trigger 生成 preview 后写入主动调度时间线卡片"},
|
||||
{Name: notificationOutboxTable, Reason: "ShouldNotify=true 时投递 notification.feishu.requested 事件"},
|
||||
}
|
||||
}
|
||||
53
backend/services/active_scheduler/rpc/active_scheduler.proto
Normal file
53
backend/services/active_scheduler/rpc/active_scheduler.proto
Normal file
@@ -0,0 +1,53 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package smartflow.active_scheduler;
|
||||
|
||||
option go_package = "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc/pb";
|
||||
|
||||
service ActiveScheduler {
|
||||
rpc DryRun(ActiveScheduleRequest) returns (JSONResponse);
|
||||
rpc Trigger(ActiveScheduleRequest) returns (TriggerResponse);
|
||||
rpc CreatePreview(ActiveScheduleRequest) returns (JSONResponse);
|
||||
rpc GetPreview(GetPreviewRequest) returns (JSONResponse);
|
||||
rpc ConfirmPreview(ConfirmPreviewRequest) returns (JSONResponse);
|
||||
}
|
||||
|
||||
message ActiveScheduleRequest {
|
||||
int64 user_id = 1;
|
||||
string trigger_type = 2;
|
||||
string target_type = 3;
|
||||
int64 target_id = 4;
|
||||
string feedback_id = 5;
|
||||
string idempotency_key = 6;
|
||||
int64 mock_now_unix_nano = 7;
|
||||
bytes payload_json = 8;
|
||||
}
|
||||
|
||||
message GetPreviewRequest {
|
||||
int64 user_id = 1;
|
||||
string preview_id = 2;
|
||||
}
|
||||
|
||||
message ConfirmPreviewRequest {
|
||||
int64 user_id = 1;
|
||||
string preview_id = 2;
|
||||
string candidate_id = 3;
|
||||
string action = 4;
|
||||
bytes edited_changes_json = 5;
|
||||
string idempotency_key = 6;
|
||||
int64 requested_at_unix_nano = 7;
|
||||
string trace_id = 8;
|
||||
}
|
||||
|
||||
message JSONResponse {
|
||||
bytes data_json = 1;
|
||||
}
|
||||
|
||||
message TriggerResponse {
|
||||
string trigger_id = 1;
|
||||
string status = 2;
|
||||
string preview_id = 3;
|
||||
bool has_preview_id = 4;
|
||||
bool dedupe_hit = 5;
|
||||
string trace_id = 6;
|
||||
}
|
||||
122
backend/services/active_scheduler/rpc/errors.go
Normal file
122
backend/services/active_scheduler/rpc/errors.go
Normal file
@@ -0,0 +1,122 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
activeSchedulerErrorDomain = "smartflow.active_scheduler"
|
||||
activeSchedulerApplyErrorDomain = "smartflow.active_scheduler.apply"
|
||||
)
|
||||
|
||||
// grpcErrorFromServiceError 负责把 active-scheduler 内部错误收口成 gRPC status。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. apply 业务错误保留 error_code,供 gateway 恢复 confirm/apply 的 HTTP 语义;
|
||||
// 2. respond.Response 继续按项目内业务码传输;
|
||||
// 3. 未分类错误只暴露通用内部错误,详细信息留在服务日志。
|
||||
func grpcErrorFromServiceError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
if applyErr, ok := activeapply.AsApplyError(err); ok {
|
||||
return grpcErrorFromApplyError(applyErr)
|
||||
}
|
||||
|
||||
var resp respond.Response
|
||||
if errors.As(err, &resp) {
|
||||
return grpcErrorFromResponse(resp)
|
||||
}
|
||||
|
||||
log.Printf("active-scheduler rpc internal error: %v", err)
|
||||
return status.Error(codes.Internal, "active-scheduler service internal error")
|
||||
}
|
||||
|
||||
func grpcErrorFromApplyError(applyErr *activeapply.ApplyError) error {
|
||||
if applyErr == nil {
|
||||
return status.Error(codes.Internal, "active-scheduler apply error")
|
||||
}
|
||||
message := strings.TrimSpace(applyErr.Message)
|
||||
if message == "" {
|
||||
message = string(applyErr.Code)
|
||||
}
|
||||
st := status.New(grpcCodeFromApplyErrorCode(applyErr.Code), message)
|
||||
detail := &errdetails.ErrorInfo{
|
||||
Domain: activeSchedulerApplyErrorDomain,
|
||||
Reason: string(applyErr.Code),
|
||||
Metadata: map[string]string{
|
||||
"info": message,
|
||||
},
|
||||
}
|
||||
withDetails, err := st.WithDetails(detail)
|
||||
if err != nil {
|
||||
return st.Err()
|
||||
}
|
||||
return withDetails.Err()
|
||||
}
|
||||
|
||||
func grpcErrorFromResponse(resp respond.Response) error {
|
||||
code := grpcCodeFromRespondStatus(resp.Status)
|
||||
message := strings.TrimSpace(resp.Info)
|
||||
if message == "" {
|
||||
message = strings.TrimSpace(resp.Status)
|
||||
}
|
||||
st := status.New(code, message)
|
||||
detail := &errdetails.ErrorInfo{
|
||||
Domain: activeSchedulerErrorDomain,
|
||||
Reason: resp.Status,
|
||||
Metadata: map[string]string{
|
||||
"info": resp.Info,
|
||||
},
|
||||
}
|
||||
withDetails, err := st.WithDetails(detail)
|
||||
if err != nil {
|
||||
return st.Err()
|
||||
}
|
||||
return withDetails.Err()
|
||||
}
|
||||
|
||||
func grpcCodeFromApplyErrorCode(code activeapply.ErrorCode) codes.Code {
|
||||
switch contracts.ApplyErrorCode(code) {
|
||||
case contracts.ApplyErrorCodeForbidden:
|
||||
return codes.PermissionDenied
|
||||
case contracts.ApplyErrorCodeTargetNotFound:
|
||||
return codes.NotFound
|
||||
case contracts.ApplyErrorCodeDBError:
|
||||
return codes.Internal
|
||||
case contracts.ApplyErrorCodeExpired,
|
||||
contracts.ApplyErrorCodeIdempotencyConflict,
|
||||
contracts.ApplyErrorCodeBaseVersionChanged,
|
||||
contracts.ApplyErrorCodeTargetCompleted,
|
||||
contracts.ApplyErrorCodeTargetAlreadySchedule,
|
||||
contracts.ApplyErrorCodeSlotConflict,
|
||||
contracts.ApplyErrorCodeAlreadyApplied:
|
||||
return codes.FailedPrecondition
|
||||
default:
|
||||
return codes.InvalidArgument
|
||||
}
|
||||
}
|
||||
|
||||
func grpcCodeFromRespondStatus(statusValue string) codes.Code {
|
||||
switch strings.TrimSpace(statusValue) {
|
||||
case respond.MissingToken.Status, respond.InvalidToken.Status, respond.InvalidClaims.Status,
|
||||
respond.ErrUnauthorized.Status, respond.WrongTokenType.Status, respond.UserLoggedOut.Status:
|
||||
return codes.Unauthenticated
|
||||
case respond.MissingParam.Status, respond.WrongParamType.Status, respond.ParamTooLong.Status:
|
||||
return codes.InvalidArgument
|
||||
}
|
||||
|
||||
if strings.HasPrefix(strings.TrimSpace(statusValue), "5") {
|
||||
return codes.Internal
|
||||
}
|
||||
return codes.InvalidArgument
|
||||
}
|
||||
155
backend/services/active_scheduler/rpc/handler.go
Normal file
155
backend/services/active_scheduler/rpc/handler.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc/pb"
|
||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
pb.UnimplementedActiveSchedulerServer
|
||||
svc *activeschedulersv.Service
|
||||
}
|
||||
|
||||
func NewHandler(svc *activeschedulersv.Service) *Handler {
|
||||
return &Handler{svc: svc}
|
||||
}
|
||||
|
||||
// DryRun 负责把 gRPC 请求转换为主动调度 dry-run 服务调用。
|
||||
func (h *Handler) DryRun(ctx context.Context, req *pb.ActiveScheduleRequest) (*pb.JSONResponse, error) {
|
||||
if h == nil || h.svc == nil {
|
||||
return nil, grpcErrorFromServiceError(errors.New("active-scheduler service dependency not initialized"))
|
||||
}
|
||||
if req == nil {
|
||||
return nil, grpcErrorFromServiceError(respond.MissingParam)
|
||||
}
|
||||
data, err := h.svc.DryRun(ctx, activeScheduleRequestFromPB(req))
|
||||
if err != nil {
|
||||
return nil, grpcErrorFromServiceError(err)
|
||||
}
|
||||
return jsonResponse(data), nil
|
||||
}
|
||||
|
||||
func (h *Handler) Trigger(ctx context.Context, req *pb.ActiveScheduleRequest) (*pb.TriggerResponse, error) {
|
||||
if h == nil || h.svc == nil {
|
||||
return nil, grpcErrorFromServiceError(errors.New("active-scheduler service dependency not initialized"))
|
||||
}
|
||||
if req == nil {
|
||||
return nil, grpcErrorFromServiceError(respond.MissingParam)
|
||||
}
|
||||
resp, err := h.svc.Trigger(ctx, activeScheduleRequestFromPB(req))
|
||||
if err != nil {
|
||||
return nil, grpcErrorFromServiceError(err)
|
||||
}
|
||||
return triggerResponseToPB(resp), nil
|
||||
}
|
||||
|
||||
func (h *Handler) CreatePreview(ctx context.Context, req *pb.ActiveScheduleRequest) (*pb.JSONResponse, error) {
|
||||
if h == nil || h.svc == nil {
|
||||
return nil, grpcErrorFromServiceError(errors.New("active-scheduler service dependency not initialized"))
|
||||
}
|
||||
if req == nil {
|
||||
return nil, grpcErrorFromServiceError(respond.MissingParam)
|
||||
}
|
||||
data, err := h.svc.CreatePreview(ctx, activeScheduleRequestFromPB(req))
|
||||
if err != nil {
|
||||
return nil, grpcErrorFromServiceError(err)
|
||||
}
|
||||
return jsonResponse(data), nil
|
||||
}
|
||||
|
||||
func (h *Handler) GetPreview(ctx context.Context, req *pb.GetPreviewRequest) (*pb.JSONResponse, error) {
|
||||
if h == nil || h.svc == nil {
|
||||
return nil, grpcErrorFromServiceError(errors.New("active-scheduler service dependency not initialized"))
|
||||
}
|
||||
if req == nil {
|
||||
return nil, grpcErrorFromServiceError(respond.MissingParam)
|
||||
}
|
||||
data, err := h.svc.GetPreview(ctx, contracts.GetPreviewRequest{
|
||||
UserID: int(req.UserId),
|
||||
PreviewID: req.PreviewId,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, grpcErrorFromServiceError(err)
|
||||
}
|
||||
return jsonResponse(data), nil
|
||||
}
|
||||
|
||||
func (h *Handler) ConfirmPreview(ctx context.Context, req *pb.ConfirmPreviewRequest) (*pb.JSONResponse, error) {
|
||||
if h == nil || h.svc == nil {
|
||||
return nil, grpcErrorFromServiceError(errors.New("active-scheduler service dependency not initialized"))
|
||||
}
|
||||
if req == nil {
|
||||
return nil, grpcErrorFromServiceError(respond.MissingParam)
|
||||
}
|
||||
data, err := h.svc.ConfirmPreview(ctx, confirmRequestFromPB(req))
|
||||
if err != nil {
|
||||
return nil, grpcErrorFromServiceError(err)
|
||||
}
|
||||
return jsonResponse(data), nil
|
||||
}
|
||||
|
||||
func activeScheduleRequestFromPB(req *pb.ActiveScheduleRequest) contracts.ActiveScheduleRequest {
|
||||
var mockNow *time.Time
|
||||
if req.MockNowUnixNano > 0 {
|
||||
value := time.Unix(0, req.MockNowUnixNano)
|
||||
mockNow = &value
|
||||
}
|
||||
return contracts.ActiveScheduleRequest{
|
||||
UserID: int(req.UserId),
|
||||
TriggerType: req.TriggerType,
|
||||
TargetType: req.TargetType,
|
||||
TargetID: int(req.TargetId),
|
||||
FeedbackID: req.FeedbackId,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
MockNow: mockNow,
|
||||
Payload: json.RawMessage(req.PayloadJson),
|
||||
}
|
||||
}
|
||||
|
||||
func confirmRequestFromPB(req *pb.ConfirmPreviewRequest) contracts.ConfirmPreviewRequest {
|
||||
requestedAt := time.Time{}
|
||||
if req.RequestedAtUnixNano > 0 {
|
||||
requestedAt = time.Unix(0, req.RequestedAtUnixNano)
|
||||
}
|
||||
return contracts.ConfirmPreviewRequest{
|
||||
UserID: int(req.UserId),
|
||||
PreviewID: req.PreviewId,
|
||||
CandidateID: req.CandidateId,
|
||||
Action: req.Action,
|
||||
EditedChanges: json.RawMessage(req.EditedChangesJson),
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
RequestedAt: requestedAt,
|
||||
TraceID: req.TraceId,
|
||||
}
|
||||
}
|
||||
|
||||
func triggerResponseToPB(resp *contracts.TriggerResponse) *pb.TriggerResponse {
|
||||
if resp == nil {
|
||||
return &pb.TriggerResponse{}
|
||||
}
|
||||
previewID := ""
|
||||
hasPreviewID := false
|
||||
if resp.PreviewID != nil {
|
||||
previewID = *resp.PreviewID
|
||||
hasPreviewID = previewID != ""
|
||||
}
|
||||
return &pb.TriggerResponse{
|
||||
TriggerId: resp.TriggerID,
|
||||
Status: resp.Status,
|
||||
PreviewId: previewID,
|
||||
HasPreviewId: hasPreviewID,
|
||||
DedupeHit: resp.DedupeHit,
|
||||
TraceId: resp.TraceID,
|
||||
}
|
||||
}
|
||||
|
||||
func jsonResponse(data json.RawMessage) *pb.JSONResponse {
|
||||
return &pb.JSONResponse{DataJson: []byte(data)}
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
package pb
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
|
||||
var _ = proto.Marshal
|
||||
|
||||
const _ = proto.ProtoPackageIsVersion3
|
||||
|
||||
type ActiveScheduleRequest struct {
|
||||
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||
TriggerType string `protobuf:"bytes,2,opt,name=trigger_type,json=triggerType,proto3" json:"trigger_type,omitempty"`
|
||||
TargetType string `protobuf:"bytes,3,opt,name=target_type,json=targetType,proto3" json:"target_type,omitempty"`
|
||||
TargetId int64 `protobuf:"varint,4,opt,name=target_id,json=targetId,proto3" json:"target_id,omitempty"`
|
||||
FeedbackId string `protobuf:"bytes,5,opt,name=feedback_id,json=feedbackId,proto3" json:"feedback_id,omitempty"`
|
||||
IdempotencyKey string `protobuf:"bytes,6,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"`
|
||||
MockNowUnixNano int64 `protobuf:"varint,7,opt,name=mock_now_unix_nano,json=mockNowUnixNano,proto3" json:"mock_now_unix_nano,omitempty"`
|
||||
PayloadJson []byte `protobuf:"bytes,8,opt,name=payload_json,json=payloadJson,proto3" json:"payload_json,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ActiveScheduleRequest) Reset() { *m = ActiveScheduleRequest{} }
|
||||
func (m *ActiveScheduleRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ActiveScheduleRequest) ProtoMessage() {}
|
||||
|
||||
type GetPreviewRequest struct {
|
||||
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||
PreviewId string `protobuf:"bytes,2,opt,name=preview_id,json=previewId,proto3" json:"preview_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *GetPreviewRequest) Reset() { *m = GetPreviewRequest{} }
|
||||
func (m *GetPreviewRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetPreviewRequest) ProtoMessage() {}
|
||||
|
||||
type ConfirmPreviewRequest struct {
|
||||
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||
PreviewId string `protobuf:"bytes,2,opt,name=preview_id,json=previewId,proto3" json:"preview_id,omitempty"`
|
||||
CandidateId string `protobuf:"bytes,3,opt,name=candidate_id,json=candidateId,proto3" json:"candidate_id,omitempty"`
|
||||
Action string `protobuf:"bytes,4,opt,name=action,proto3" json:"action,omitempty"`
|
||||
EditedChangesJson []byte `protobuf:"bytes,5,opt,name=edited_changes_json,json=editedChangesJson,proto3" json:"edited_changes_json,omitempty"`
|
||||
IdempotencyKey string `protobuf:"bytes,6,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"`
|
||||
RequestedAtUnixNano int64 `protobuf:"varint,7,opt,name=requested_at_unix_nano,json=requestedAtUnixNano,proto3" json:"requested_at_unix_nano,omitempty"`
|
||||
TraceId string `protobuf:"bytes,8,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ConfirmPreviewRequest) Reset() { *m = ConfirmPreviewRequest{} }
|
||||
func (m *ConfirmPreviewRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ConfirmPreviewRequest) ProtoMessage() {}
|
||||
|
||||
type JSONResponse struct {
|
||||
DataJson []byte `protobuf:"bytes,1,opt,name=data_json,json=dataJson,proto3" json:"data_json,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *JSONResponse) Reset() { *m = JSONResponse{} }
|
||||
func (m *JSONResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*JSONResponse) ProtoMessage() {}
|
||||
|
||||
type TriggerResponse struct {
|
||||
TriggerId string `protobuf:"bytes,1,opt,name=trigger_id,json=triggerId,proto3" json:"trigger_id,omitempty"`
|
||||
Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
|
||||
PreviewId string `protobuf:"bytes,3,opt,name=preview_id,json=previewId,proto3" json:"preview_id,omitempty"`
|
||||
HasPreviewId bool `protobuf:"varint,4,opt,name=has_preview_id,json=hasPreviewId,proto3" json:"has_preview_id,omitempty"`
|
||||
DedupeHit bool `protobuf:"varint,5,opt,name=dedupe_hit,json=dedupeHit,proto3" json:"dedupe_hit,omitempty"`
|
||||
TraceId string `protobuf:"bytes,6,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TriggerResponse) Reset() { *m = TriggerResponse{} }
|
||||
func (m *TriggerResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*TriggerResponse) ProtoMessage() {}
|
||||
@@ -0,0 +1,201 @@
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
ActiveScheduler_DryRun_FullMethodName = "/smartflow.active_scheduler.ActiveScheduler/DryRun"
|
||||
ActiveScheduler_Trigger_FullMethodName = "/smartflow.active_scheduler.ActiveScheduler/Trigger"
|
||||
ActiveScheduler_CreatePreview_FullMethodName = "/smartflow.active_scheduler.ActiveScheduler/CreatePreview"
|
||||
ActiveScheduler_GetPreview_FullMethodName = "/smartflow.active_scheduler.ActiveScheduler/GetPreview"
|
||||
ActiveScheduler_ConfirmPreview_FullMethodName = "/smartflow.active_scheduler.ActiveScheduler/ConfirmPreview"
|
||||
)
|
||||
|
||||
type ActiveSchedulerClient interface {
|
||||
DryRun(ctx context.Context, in *ActiveScheduleRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||
Trigger(ctx context.Context, in *ActiveScheduleRequest, opts ...grpc.CallOption) (*TriggerResponse, error)
|
||||
CreatePreview(ctx context.Context, in *ActiveScheduleRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||
GetPreview(ctx context.Context, in *GetPreviewRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||
ConfirmPreview(ctx context.Context, in *ConfirmPreviewRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||
}
|
||||
|
||||
type activeSchedulerClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewActiveSchedulerClient(cc grpc.ClientConnInterface) ActiveSchedulerClient {
|
||||
return &activeSchedulerClient{cc}
|
||||
}
|
||||
|
||||
func (c *activeSchedulerClient) DryRun(ctx context.Context, in *ActiveScheduleRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||
out := new(JSONResponse)
|
||||
err := c.cc.Invoke(ctx, ActiveScheduler_DryRun_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *activeSchedulerClient) Trigger(ctx context.Context, in *ActiveScheduleRequest, opts ...grpc.CallOption) (*TriggerResponse, error) {
|
||||
out := new(TriggerResponse)
|
||||
err := c.cc.Invoke(ctx, ActiveScheduler_Trigger_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *activeSchedulerClient) CreatePreview(ctx context.Context, in *ActiveScheduleRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||
out := new(JSONResponse)
|
||||
err := c.cc.Invoke(ctx, ActiveScheduler_CreatePreview_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *activeSchedulerClient) GetPreview(ctx context.Context, in *GetPreviewRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||
out := new(JSONResponse)
|
||||
err := c.cc.Invoke(ctx, ActiveScheduler_GetPreview_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *activeSchedulerClient) ConfirmPreview(ctx context.Context, in *ConfirmPreviewRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||
out := new(JSONResponse)
|
||||
err := c.cc.Invoke(ctx, ActiveScheduler_ConfirmPreview_FullMethodName, in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type ActiveSchedulerServer interface {
|
||||
DryRun(context.Context, *ActiveScheduleRequest) (*JSONResponse, error)
|
||||
Trigger(context.Context, *ActiveScheduleRequest) (*TriggerResponse, error)
|
||||
CreatePreview(context.Context, *ActiveScheduleRequest) (*JSONResponse, error)
|
||||
GetPreview(context.Context, *GetPreviewRequest) (*JSONResponse, error)
|
||||
ConfirmPreview(context.Context, *ConfirmPreviewRequest) (*JSONResponse, error)
|
||||
}
|
||||
|
||||
type UnimplementedActiveSchedulerServer struct{}
|
||||
|
||||
func (UnimplementedActiveSchedulerServer) DryRun(context.Context, *ActiveScheduleRequest) (*JSONResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DryRun not implemented")
|
||||
}
|
||||
|
||||
func (UnimplementedActiveSchedulerServer) Trigger(context.Context, *ActiveScheduleRequest) (*TriggerResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Trigger not implemented")
|
||||
}
|
||||
|
||||
func (UnimplementedActiveSchedulerServer) CreatePreview(context.Context, *ActiveScheduleRequest) (*JSONResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreatePreview not implemented")
|
||||
}
|
||||
|
||||
func (UnimplementedActiveSchedulerServer) GetPreview(context.Context, *GetPreviewRequest) (*JSONResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetPreview not implemented")
|
||||
}
|
||||
|
||||
func (UnimplementedActiveSchedulerServer) ConfirmPreview(context.Context, *ConfirmPreviewRequest) (*JSONResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ConfirmPreview not implemented")
|
||||
}
|
||||
|
||||
func RegisterActiveSchedulerServer(s grpc.ServiceRegistrar, srv ActiveSchedulerServer) {
|
||||
s.RegisterService(&ActiveScheduler_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _ActiveScheduler_DryRun_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActiveScheduleRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ActiveSchedulerServer).DryRun(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: ActiveScheduler_DryRun_FullMethodName}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ActiveSchedulerServer).DryRun(ctx, req.(*ActiveScheduleRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _ActiveScheduler_Trigger_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActiveScheduleRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ActiveSchedulerServer).Trigger(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: ActiveScheduler_Trigger_FullMethodName}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ActiveSchedulerServer).Trigger(ctx, req.(*ActiveScheduleRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _ActiveScheduler_CreatePreview_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ActiveScheduleRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ActiveSchedulerServer).CreatePreview(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: ActiveScheduler_CreatePreview_FullMethodName}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ActiveSchedulerServer).CreatePreview(ctx, req.(*ActiveScheduleRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _ActiveScheduler_GetPreview_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetPreviewRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ActiveSchedulerServer).GetPreview(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: ActiveScheduler_GetPreview_FullMethodName}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ActiveSchedulerServer).GetPreview(ctx, req.(*GetPreviewRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _ActiveScheduler_ConfirmPreview_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ConfirmPreviewRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ActiveSchedulerServer).ConfirmPreview(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: ActiveScheduler_ConfirmPreview_FullMethodName}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ActiveSchedulerServer).ConfirmPreview(ctx, req.(*ConfirmPreviewRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var ActiveScheduler_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "smartflow.active_scheduler.ActiveScheduler",
|
||||
HandlerType: (*ActiveSchedulerServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{MethodName: "DryRun", Handler: _ActiveScheduler_DryRun_Handler},
|
||||
{MethodName: "Trigger", Handler: _ActiveScheduler_Trigger_Handler},
|
||||
{MethodName: "CreatePreview", Handler: _ActiveScheduler_CreatePreview_Handler},
|
||||
{MethodName: "GetPreview", Handler: _ActiveScheduler_GetPreview_Handler},
|
||||
{MethodName: "ConfirmPreview", Handler: _ActiveScheduler_ConfirmPreview_Handler},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "services/active_scheduler/rpc/active_scheduler.proto",
|
||||
}
|
||||
60
backend/services/active_scheduler/rpc/server.go
Normal file
60
backend/services/active_scheduler/rpc/server.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package rpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc/pb"
|
||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||
"github.com/zeromicro/go-zero/core/service"
|
||||
"github.com/zeromicro/go-zero/zrpc"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultListenOn = "0.0.0.0:9083"
|
||||
defaultTimeout = 8 * time.Second
|
||||
)
|
||||
|
||||
type ServerOptions struct {
|
||||
ListenOn string
|
||||
Timeout time.Duration
|
||||
Service *activeschedulersv.Service
|
||||
}
|
||||
|
||||
// NewServer 创建 active-scheduler zrpc 服务端。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责 zrpc server 配置与 gRPC handler 注册;
|
||||
// 2. 不创建数据库、LLM、outbox 或 worker,它们由 cmd/active-scheduler 管理;
|
||||
// 3. 返回 listenOn 供进程入口打印启动日志。
|
||||
func NewServer(opts ServerOptions) (*zrpc.RpcServer, string, error) {
|
||||
if opts.Service == nil {
|
||||
return nil, "", errors.New("active-scheduler service dependency not initialized")
|
||||
}
|
||||
|
||||
listenOn := strings.TrimSpace(opts.ListenOn)
|
||||
if listenOn == "" {
|
||||
listenOn = defaultListenOn
|
||||
}
|
||||
timeout := opts.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeout
|
||||
}
|
||||
|
||||
server, err := zrpc.NewServer(zrpc.RpcServerConf{
|
||||
ServiceConf: service.ServiceConf{
|
||||
Name: "active-scheduler.rpc",
|
||||
Mode: service.DevMode,
|
||||
},
|
||||
ListenOn: listenOn,
|
||||
Timeout: int64(timeout / time.Millisecond),
|
||||
}, func(grpcServer *grpc.Server) {
|
||||
pb.RegisterActiveSchedulerServer(grpcServer, NewHandler(opts.Service))
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return server, listenOn, nil
|
||||
}
|
||||
345
backend/services/active_scheduler/sv/service.go
Normal file
345
backend/services/active_scheduler/sv/service.go
Normal file
@@ -0,0 +1,345 @@
|
||||
package sv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activejob "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/job"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
activesel "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const defaultJobScanLimit = 50
|
||||
|
||||
// Options 描述 active-scheduler 独立服务的启动参数。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只承载服务内部 worker 节奏和 outbox 配置;
|
||||
// 2. 不承载数据库连接、模型配置或 HTTP/gateway 配置;
|
||||
// 3. 零值使用安全默认值,便于本地 smoke 先跑通。
|
||||
type Options struct {
|
||||
JobScanEvery time.Duration
|
||||
JobScanLimit int
|
||||
KafkaConfig kafkabus.Config
|
||||
}
|
||||
|
||||
// Service 是 active-scheduler 独立进程内的服务门面。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 对 RPC 层暴露 dry-run / trigger / preview / confirm;
|
||||
// 2. 对 cmd 层暴露 outbox consumer 和 due job scanner 生命周期;
|
||||
// 3. 内部复用 services/active_scheduler/core 下的领域核心,避免服务入口和算法实现散落在旧根目录。
|
||||
type Service struct {
|
||||
dryRun *activesvc.DryRunService
|
||||
trigger *activesvc.TriggerService
|
||||
previewConfirm *activesvc.PreviewConfirmService
|
||||
eventBus *outboxinfra.EventBus
|
||||
jobScanner *activejob.Scanner
|
||||
}
|
||||
|
||||
// New 构造 active-scheduler 服务运行态。
|
||||
//
|
||||
// 步骤化说明:
|
||||
// 1. 先组装 active-scheduler 自有 DAO、只读 readers、dry-run 和 preview/confirm;
|
||||
// 2. 再按 active-scheduler 服务归属注册 outbox 路由与 active_schedule.triggered handler;
|
||||
// 3. 最后创建 due job scanner,让 worker 能从 active_schedule_jobs 产生正式 trigger;
|
||||
// 4. Kafka 关闭时保留 dry-run / preview / confirm,同步 trigger 会返回明确错误。
|
||||
func New(db *gorm.DB, llmService *llmservice.Service, opts Options) (*Service, error) {
|
||||
if db == nil {
|
||||
return nil, errors.New("active-scheduler database 未初始化")
|
||||
}
|
||||
|
||||
activeDAO := rootdao.NewActiveScheduleDAO(db)
|
||||
activeReaders := activeadapters.NewGormReaders(db)
|
||||
readers := activeadapters.ReadersFromGorm(activeReaders)
|
||||
dryRun, err := activesvc.NewDryRunService(readers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
previewConfirm, err := buildPreviewConfirmService(db, activeDAO, dryRun)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
eventBus, err := buildActiveSchedulerEventBus(outboxRepo, opts.KafkaConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
triggerService, err := activesvc.NewTriggerService(activeDAO, eventBus)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var jobScanner *activejob.Scanner
|
||||
if eventBus != nil {
|
||||
graphRunner, err := buildGraphRunner(dryRun, llmService)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
workflow, err := activesvc.NewTriggerWorkflowServiceWithOptions(
|
||||
activeDAO,
|
||||
graphRunner,
|
||||
outboxRepo,
|
||||
opts.KafkaConfig,
|
||||
activesvc.WithActiveScheduleSessionBridge(rootdao.NewAgentDAO(db), rootdao.NewActiveScheduleSessionDAO(db)),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := registerActiveSchedulerOutboxHandler(eventBus, outboxRepo, workflow); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
jobScanner, err = activejob.NewScanner(activeDAO, readers, triggerService, activejob.ScannerOptions{
|
||||
ScanEvery: opts.JobScanEvery,
|
||||
Limit: normalizeJobScanLimit(opts.JobScanLimit),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &Service{
|
||||
dryRun: dryRun,
|
||||
trigger: triggerService,
|
||||
previewConfirm: previewConfirm,
|
||||
eventBus: eventBus,
|
||||
jobScanner: jobScanner,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// StartWorkers 启动 active-scheduler 自己的 outbox relay/consumer 和 due job scanner。
|
||||
func (s *Service) StartWorkers(ctx context.Context) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
if s.eventBus != nil {
|
||||
s.eventBus.Start(ctx)
|
||||
}
|
||||
if s.jobScanner != nil {
|
||||
s.jobScanner.Start(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Close 关闭 active-scheduler 持有的 Kafka 资源。
|
||||
func (s *Service) Close() {
|
||||
if s != nil && s.eventBus != nil {
|
||||
s.eventBus.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// DryRun 同步执行主动调度诊断,并以 JSON 形式返回现有响应结构。
|
||||
func (s *Service) DryRun(ctx context.Context, req contracts.ActiveScheduleRequest) (json.RawMessage, error) {
|
||||
if s == nil || s.dryRun == nil {
|
||||
return nil, errors.New("active-scheduler dry-run service 未初始化")
|
||||
}
|
||||
trig := buildDryRunTrigger(req, time.Now())
|
||||
result, err := s.dryRun.DryRun(ctx, trig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return marshalResponseJSON(result)
|
||||
}
|
||||
|
||||
// Trigger 创建正式 trigger 并发布 active_schedule.triggered。
|
||||
func (s *Service) Trigger(ctx context.Context, req contracts.ActiveScheduleRequest) (*contracts.TriggerResponse, error) {
|
||||
if s == nil || s.trigger == nil {
|
||||
return nil, errors.New("active-scheduler trigger service 未初始化")
|
||||
}
|
||||
now := time.Now()
|
||||
resp, err := s.trigger.CreateAndPublish(ctx, activesvc.TriggerRequest{
|
||||
UserID: req.UserID,
|
||||
TriggerType: trigger.TriggerType(req.TriggerType),
|
||||
Source: trigger.SourceAPITrigger,
|
||||
TargetType: trigger.TargetType(req.TargetType),
|
||||
TargetID: req.TargetID,
|
||||
FeedbackID: req.FeedbackID,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
MockNow: req.MockNow,
|
||||
IsMockTime: req.MockNow != nil,
|
||||
RequestedAt: now,
|
||||
Payload: normalizePayload(req.Payload),
|
||||
TraceID: fmt.Sprintf("trace_api_trigger_%d_%d", req.UserID, now.UnixNano()),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &contracts.TriggerResponse{
|
||||
TriggerID: resp.TriggerID,
|
||||
Status: resp.Status,
|
||||
PreviewID: resp.PreviewID,
|
||||
DedupeHit: resp.DedupeHit,
|
||||
TraceID: resp.TraceID,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreatePreview 同步 dry-run 后把 top1 候选固化为待确认预览。
|
||||
func (s *Service) CreatePreview(ctx context.Context, req contracts.ActiveScheduleRequest) (json.RawMessage, error) {
|
||||
if s == nil || s.dryRun == nil || s.previewConfirm == nil {
|
||||
return nil, errors.New("active-scheduler preview service 未初始化")
|
||||
}
|
||||
now := time.Now()
|
||||
trig := buildDryRunTrigger(req, now)
|
||||
trig.TriggerID = fmt.Sprintf("ast_api_%d_%d", req.UserID, now.UnixNano())
|
||||
trig.TraceID = fmt.Sprintf("trace_api_preview_%d_%d", req.UserID, now.UnixNano())
|
||||
|
||||
dryRunResult, err := s.dryRun.DryRun(ctx, trig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
previewResp, err := s.previewConfirm.CreatePreviewFromDryRun(ctx, activepreview.CreatePreviewRequest{
|
||||
ActiveContext: dryRunResult.Context,
|
||||
Observation: dryRunResult.Observation,
|
||||
Candidates: dryRunResult.Candidates,
|
||||
TriggerID: trig.TriggerID,
|
||||
GeneratedAt: now,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return marshalResponseJSON(previewResp.Detail)
|
||||
}
|
||||
|
||||
// GetPreview 查询主动调度预览详情。
|
||||
func (s *Service) GetPreview(ctx context.Context, req contracts.GetPreviewRequest) (json.RawMessage, error) {
|
||||
if s == nil || s.previewConfirm == nil {
|
||||
return nil, errors.New("active-scheduler preview service 未初始化")
|
||||
}
|
||||
detail, err := s.previewConfirm.GetPreview(ctx, req.UserID, req.PreviewID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return marshalResponseJSON(detail)
|
||||
}
|
||||
|
||||
// ConfirmPreview 同步确认并正式应用主动调度预览。
|
||||
func (s *Service) ConfirmPreview(ctx context.Context, req contracts.ConfirmPreviewRequest) (json.RawMessage, error) {
|
||||
if s == nil || s.previewConfirm == nil {
|
||||
return nil, errors.New("active-scheduler confirm service 未初始化")
|
||||
}
|
||||
editedChanges, err := decodeEditedChanges(req.EditedChanges)
|
||||
if err != nil {
|
||||
return nil, activeapply.NewApplyError(activeapply.ErrorCodeInvalidEditedChanges, "edited_changes 不是合法的变更数组", err)
|
||||
}
|
||||
requestedAt := req.RequestedAt
|
||||
if requestedAt.IsZero() {
|
||||
requestedAt = time.Now()
|
||||
}
|
||||
result, err := s.previewConfirm.ConfirmPreview(ctx, activeapply.ConfirmRequest{
|
||||
PreviewID: req.PreviewID,
|
||||
UserID: req.UserID,
|
||||
CandidateID: req.CandidateID,
|
||||
Action: activeapply.ConfirmAction(req.Action),
|
||||
EditedChanges: editedChanges,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
RequestedAt: requestedAt,
|
||||
TraceID: req.TraceID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return marshalResponseJSON(result)
|
||||
}
|
||||
|
||||
func buildPreviewConfirmService(db *gorm.DB, activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService) (*activesvc.PreviewConfirmService, error) {
|
||||
previewService, err := activepreview.NewService(activeDAO)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, applyadapter.NewGormApplyAdapter(db))
|
||||
}
|
||||
|
||||
func buildGraphRunner(dryRun *activesvc.DryRunService, llmService *llmservice.Service) (*activegraph.Runner, error) {
|
||||
var llmClient *llmservice.Client
|
||||
if llmService != nil {
|
||||
llmClient = llmService.ProClient()
|
||||
}
|
||||
return activegraph.NewRunner(dryRun.AsGraphDryRunFunc(), activesel.NewService(llmClient))
|
||||
}
|
||||
|
||||
func buildActiveSchedulerEventBus(outboxRepo *outboxinfra.Repository, kafkaCfg kafkabus.Config) (*outboxinfra.EventBus, error) {
|
||||
if outboxRepo == nil {
|
||||
return nil, errors.New("active-scheduler outbox repository 未初始化")
|
||||
}
|
||||
if err := outboxinfra.RegisterEventService(sharedevents.ActiveScheduleTriggeredEventType, outboxinfra.ServiceActiveScheduler); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventBus, err := outboxinfra.NewEventBus(outboxRepo, kafkaCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return eventBus, nil
|
||||
}
|
||||
|
||||
func registerActiveSchedulerOutboxHandler(eventBus *outboxinfra.EventBus, outboxRepo *outboxinfra.Repository, workflow eventsvc.ActiveScheduleTriggeredProcessor) error {
|
||||
if eventBus == nil {
|
||||
return nil
|
||||
}
|
||||
return eventsvc.RegisterActiveScheduleTriggeredHandler(eventBus, outboxRepo, workflow)
|
||||
}
|
||||
|
||||
func buildDryRunTrigger(req contracts.ActiveScheduleRequest, now time.Time) trigger.ActiveScheduleTrigger {
|
||||
return trigger.ActiveScheduleTrigger{
|
||||
UserID: req.UserID,
|
||||
TriggerType: trigger.TriggerType(req.TriggerType),
|
||||
Source: trigger.SourceAPIDryRun,
|
||||
TargetType: trigger.TargetType(req.TargetType),
|
||||
TargetID: req.TargetID,
|
||||
FeedbackID: req.FeedbackID,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
MockNow: req.MockNow,
|
||||
IsMockTime: req.MockNow != nil,
|
||||
RequestedAt: now,
|
||||
}
|
||||
}
|
||||
|
||||
func normalizePayload(raw json.RawMessage) json.RawMessage {
|
||||
if len(raw) == 0 || strings.TrimSpace(string(raw)) == "" || strings.TrimSpace(string(raw)) == "null" {
|
||||
return json.RawMessage("{}")
|
||||
}
|
||||
return raw
|
||||
}
|
||||
|
||||
func decodeEditedChanges(raw json.RawMessage) ([]activeapply.ApplyChange, error) {
|
||||
if len(raw) == 0 || strings.TrimSpace(string(raw)) == "" || strings.TrimSpace(string(raw)) == "null" {
|
||||
return nil, nil
|
||||
}
|
||||
var changes []activeapply.ApplyChange
|
||||
if err := json.Unmarshal(raw, &changes); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
func marshalResponseJSON(value any) (json.RawMessage, error) {
|
||||
raw, err := json.Marshal(value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.RawMessage(raw), nil
|
||||
}
|
||||
|
||||
func normalizeJobScanLimit(limit int) int {
|
||||
if limit <= 0 {
|
||||
return defaultJobScanLimit
|
||||
}
|
||||
return limit
|
||||
}
|
||||
Reference in New Issue
Block a user