Version: 0.9.70.dev.260504
后端:
1. 阶段 5 schedule 首刀服务化落地,新增 `cmd/schedule`、`services/schedule/{dao,rpc,sv,core}`、`gateway/client/schedule`、`shared/contracts/schedule` 和 schedule port
2. gateway `/api/v1/schedule/*` 切到 schedule zrpc client,HTTP 门面只保留鉴权、参数绑定、超时和轻量转发
3. active-scheduler 的 schedule facts、feedback 和 confirm apply 改为调用 schedule RPC adapter,减少对 `schedule_events`、`schedules`、`task_classes`、`task_items` 的跨域 DB 依赖
4. 单体聊天主动调度 rerun 的 schedule 读写链路切到 schedule RPC,迁移期仅保留 task facts 直读 Gorm
5. 为 schedule zrpc 补充 `Ping` 启动健康检查,并在 gateway client 与 active-scheduler adapter 初始化时校验服务可用
6. `cmd/schedule` 独立初始化 DB / Redis,只 AutoMigrate schedule 自有表,并显式检查迁移期 task / task-class 依赖表
7. 更新 active-scheduler 依赖表检查和 preview confirm apply 抽象,保留旧 Gorm 实现作为迁移期回退路径
8. 补充 `schedule.rpc` 示例配置和 schedule HTTP RPC 超时配置
文档:
1. 更新微服务迁移计划,将阶段 5 schedule 首刀进展、当前切流点、旧实现保留范围和 active-scheduler DB 依赖收缩情况写入基线
This commit is contained in:
@@ -10,6 +10,7 @@ import (
|
|||||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||||
"github.com/LoveLosita/smartflow/backend/inits"
|
"github.com/LoveLosita/smartflow/backend/inits"
|
||||||
|
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||||
activeschedulerdao "github.com/LoveLosita/smartflow/backend/services/active_scheduler/dao"
|
activeschedulerdao "github.com/LoveLosita/smartflow/backend/services/active_scheduler/dao"
|
||||||
activeschedulerrpc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc"
|
activeschedulerrpc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc"
|
||||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||||
@@ -45,6 +46,11 @@ func main() {
|
|||||||
JobScanEvery: viper.GetDuration("activeScheduler.jobScanEvery"),
|
JobScanEvery: viper.GetDuration("activeScheduler.jobScanEvery"),
|
||||||
JobScanLimit: viper.GetInt("activeScheduler.jobScanLimit"),
|
JobScanLimit: viper.GetInt("activeScheduler.jobScanLimit"),
|
||||||
KafkaConfig: kafkabus.LoadConfig(),
|
KafkaConfig: kafkabus.LoadConfig(),
|
||||||
|
ScheduleRPC: activeadapters.ScheduleRPCConfig{
|
||||||
|
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||||
|
Target: viper.GetString("schedule.rpc.target"),
|
||||||
|
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||||
|
},
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("failed to initialize active-scheduler service: %v", err)
|
log.Fatalf("failed to initialize active-scheduler service: %v", err)
|
||||||
|
|||||||
68
backend/cmd/schedule/main.go
Normal file
68
backend/cmd/schedule/main.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||||
|
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||||
|
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||||
|
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||||
|
schedulerpc "github.com/LoveLosita/smartflow/backend/services/schedule/rpc"
|
||||||
|
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
if err := bootstrap.LoadConfig(); err != nil {
|
||||||
|
log.Fatalf("failed to load config: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
|
db, err := scheduledao.OpenDBFromConfig()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to connect schedule database: %v", err)
|
||||||
|
}
|
||||||
|
redisClient, err := scheduledao.OpenRedisFromConfig()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to connect schedule redis: %v", err)
|
||||||
|
}
|
||||||
|
defer redisClient.Close()
|
||||||
|
|
||||||
|
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||||
|
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||||
|
log.Fatalf("failed to initialize schedule cache deleter: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
svc := schedulesv.NewScheduleService(
|
||||||
|
scheduledao.NewScheduleDAO(db),
|
||||||
|
rootdao.NewTaskClassDAO(db),
|
||||||
|
rootdao.NewManager(db),
|
||||||
|
cacheRepo,
|
||||||
|
)
|
||||||
|
svc.SetApplyAdapter(applyadapter.NewGormApplyAdapter(db))
|
||||||
|
|
||||||
|
server, listenOn, err := schedulerpc.NewServer(schedulerpc.ServerOptions{
|
||||||
|
ListenOn: viper.GetString("schedule.rpc.listenOn"),
|
||||||
|
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||||
|
Service: svc,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to build schedule zrpc server: %v", err)
|
||||||
|
}
|
||||||
|
defer server.Stop()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
log.Printf("schedule zrpc service starting on %s", listenOn)
|
||||||
|
server.Start()
|
||||||
|
}()
|
||||||
|
|
||||||
|
<-ctx.Done()
|
||||||
|
log.Println("schedule service stopping")
|
||||||
|
}
|
||||||
@@ -16,6 +16,7 @@ import (
|
|||||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||||
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
|
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
|
||||||
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
|
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
|
||||||
|
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||||
gatewayrouter "github.com/LoveLosita/smartflow/backend/gateway/router"
|
gatewayrouter "github.com/LoveLosita/smartflow/backend/gateway/router"
|
||||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||||
@@ -36,7 +37,7 @@ import (
|
|||||||
agentsvcsvc "github.com/LoveLosita/smartflow/backend/service/agentsvc"
|
agentsvcsvc "github.com/LoveLosita/smartflow/backend/service/agentsvc"
|
||||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||||
@@ -221,6 +222,14 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to initialize notification zrpc client: %w", err)
|
return nil, fmt.Errorf("failed to initialize notification zrpc client: %w", err)
|
||||||
}
|
}
|
||||||
|
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||||
|
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||||
|
Target: viper.GetString("schedule.rpc.target"),
|
||||||
|
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize schedule zrpc client: %w", err)
|
||||||
|
}
|
||||||
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
|
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
|
||||||
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
|
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
|
||||||
Target: viper.GetString("activeScheduler.rpc.target"),
|
Target: viper.GetString("activeScheduler.rpc.target"),
|
||||||
@@ -259,12 +268,22 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
|||||||
memoryCfg,
|
memoryCfg,
|
||||||
)
|
)
|
||||||
|
|
||||||
activeReaders := activeadapters.NewGormReaders(db)
|
// 1. 迁移期 task_pool 事实仍由单体 task 表读取,下一轮切 task 服务后替换为 task RPC;
|
||||||
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersFromGorm(activeReaders))
|
// 2. schedule facts / feedback / apply 已统一走 schedule RPC,避免聊天 rerun 继续直连 schedule 表。
|
||||||
|
activeTaskReader := activeadapters.NewGormReaders(db)
|
||||||
|
activeScheduleAdapter, err := activeadapters.NewScheduleRPCAdapter(activeadapters.ScheduleRPCConfig{
|
||||||
|
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||||
|
Target: viper.GetString("schedule.rpc.target"),
|
||||||
|
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to initialize schedule rpc adapter for active-scheduler rerun: %w", err)
|
||||||
|
}
|
||||||
|
activeScheduleDryRun, err := activesvc.NewDryRunService(activeadapters.ReadersWithScheduleRPC(activeTaskReader, activeScheduleAdapter))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(db, manager.ActiveSchedule, activeScheduleDryRun)
|
activeSchedulePreviewConfirm, err := buildActiveSchedulePreviewConfirmService(manager.ActiveSchedule, activeScheduleDryRun, activeScheduleAdapter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -272,13 +291,13 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
|||||||
// 2. dry-run 与 selection 通过 graph runner 串起来,避免 trigger_pipeline 再拼第二套候选逻辑。
|
// 2. dry-run 与 selection 通过 graph runner 串起来,避免 trigger_pipeline 再拼第二套候选逻辑。
|
||||||
activeScheduleLLMClient := llmService.ProClient()
|
activeScheduleLLMClient := llmService.ProClient()
|
||||||
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
activeScheduleSelector := activesel.NewService(activeScheduleLLMClient)
|
||||||
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeReaders, activeScheduleLLMClient)
|
activeScheduleFeedbackLocator := activefeedbacklocate.NewService(activeScheduleAdapter, activeScheduleLLMClient)
|
||||||
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
activeScheduleGraphRunner, err := activegraph.NewRunner(activeScheduleDryRun.AsGraphDryRunFunc(), activeScheduleSelector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(manager.ActiveSchedule, activeScheduleGraphRunner, activeSchedulePreviewConfirm, activeScheduleFeedbackLocator))
|
agentService.SetActiveScheduleSessionRerunFunc(buildActiveScheduleSessionRerunFunc(manager.ActiveSchedule, activeScheduleGraphRunner, activeSchedulePreviewConfirm, activeScheduleFeedbackLocator))
|
||||||
handlers := buildAPIHandlers(taskSv, taskClassService, courseService, scheduleService, agentService, memoryModule, activeSchedulerClient, notificationClient)
|
handlers := buildAPIHandlers(taskSv, taskClassService, courseService, scheduleClient, agentService, memoryModule, activeSchedulerClient, notificationClient)
|
||||||
|
|
||||||
runtime := &appRuntime{
|
runtime := &appRuntime{
|
||||||
db: db,
|
db: db,
|
||||||
@@ -363,17 +382,14 @@ func buildCourseService(llmService *llmservice.Service, courseRepo *dao.CourseDA
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildActiveScheduleDryRunService(db *gorm.DB) (*activesvc.DryRunService, error) {
|
func buildActiveSchedulePreviewConfirmService(activeDAO *dao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||||
readers := activeadapters.NewGormReaders(db)
|
ApplyActiveScheduleChanges(context.Context, activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error)
|
||||||
return activesvc.NewDryRunService(activeadapters.ReadersFromGorm(readers))
|
}) (*activesvc.PreviewConfirmService, error) {
|
||||||
}
|
|
||||||
|
|
||||||
func buildActiveSchedulePreviewConfirmService(db *gorm.DB, activeDAO *dao.ActiveScheduleDAO, dryRun *activesvc.DryRunService) (*activesvc.PreviewConfirmService, error) {
|
|
||||||
previewService, err := activepreview.NewService(activeDAO)
|
previewService, err := activepreview.NewService(activeDAO)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, applyadapter.NewGormApplyAdapter(db))
|
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, scheduleApplyAdapter)
|
||||||
}
|
}
|
||||||
|
|
||||||
// buildActiveScheduleSessionRerunFunc 把主动调度定位器 / graph / preview 能力装成聊天入口可调用的 rerun 闭包。
|
// buildActiveScheduleSessionRerunFunc 把主动调度定位器 / graph / preview 能力装成聊天入口可调用的 rerun 闭包。
|
||||||
@@ -810,7 +826,7 @@ func buildAPIHandlers(
|
|||||||
taskService *service.TaskService,
|
taskService *service.TaskService,
|
||||||
taskClassService *service.TaskClassService,
|
taskClassService *service.TaskClassService,
|
||||||
courseService *service.CourseService,
|
courseService *service.CourseService,
|
||||||
scheduleService *service.ScheduleService,
|
scheduleClient ports.ScheduleCommandClient,
|
||||||
agentService *service.AgentService,
|
agentService *service.AgentService,
|
||||||
memoryModule *memory.Module,
|
memoryModule *memory.Module,
|
||||||
activeSchedulerClient ports.ActiveSchedulerCommandClient,
|
activeSchedulerClient ports.ActiveSchedulerCommandClient,
|
||||||
@@ -820,7 +836,7 @@ func buildAPIHandlers(
|
|||||||
TaskHandler: api.NewTaskHandler(taskService),
|
TaskHandler: api.NewTaskHandler(taskService),
|
||||||
TaskClassHandler: api.NewTaskClassHandler(taskClassService),
|
TaskClassHandler: api.NewTaskClassHandler(taskClassService),
|
||||||
CourseHandler: api.NewCourseHandler(courseService),
|
CourseHandler: api.NewCourseHandler(courseService),
|
||||||
ScheduleHandler: api.NewScheduleAPI(scheduleService),
|
ScheduleHandler: api.NewScheduleAPI(scheduleClient),
|
||||||
AgentHandler: api.NewAgentHandler(agentService),
|
AgentHandler: api.NewAgentHandler(agentService),
|
||||||
MemoryHandler: api.NewMemoryHandler(memoryModule),
|
MemoryHandler: api.NewMemoryHandler(memoryModule),
|
||||||
ActiveSchedule: api.NewActiveScheduleAPI(activeSchedulerClient),
|
ActiveSchedule: api.NewActiveScheduleAPI(activeSchedulerClient),
|
||||||
|
|||||||
@@ -59,6 +59,14 @@ notification:
|
|||||||
retryScanEvery: 1m
|
retryScanEvery: 1m
|
||||||
retryBatchSize: 50
|
retryBatchSize: 50
|
||||||
|
|
||||||
|
# 日程服务配置。
|
||||||
|
schedule:
|
||||||
|
rpc:
|
||||||
|
listenOn: "0.0.0.0:9084"
|
||||||
|
endpoints:
|
||||||
|
- "127.0.0.1:9084"
|
||||||
|
timeout: 6s
|
||||||
|
|
||||||
# 主动调度服务配置。
|
# 主动调度服务配置。
|
||||||
activeScheduler:
|
activeScheduler:
|
||||||
rpc:
|
rpc:
|
||||||
|
|||||||
@@ -6,19 +6,21 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/LoveLosita/smartflow/backend/model"
|
|
||||||
"github.com/LoveLosita/smartflow/backend/respond"
|
"github.com/LoveLosita/smartflow/backend/respond"
|
||||||
"github.com/LoveLosita/smartflow/backend/service"
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||||
"github.com/gin-gonic/gin"
|
"github.com/gin-gonic/gin"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const scheduleRequestTimeout = 6 * time.Second
|
||||||
|
|
||||||
type ScheduleAPI struct {
|
type ScheduleAPI struct {
|
||||||
scheduleService *service.ScheduleService
|
scheduleClient ports.ScheduleCommandClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewScheduleAPI(scheduleService *service.ScheduleService) *ScheduleAPI {
|
func NewScheduleAPI(scheduleClient ports.ScheduleCommandClient) *ScheduleAPI {
|
||||||
return &ScheduleAPI{
|
return &ScheduleAPI{
|
||||||
scheduleService: scheduleService,
|
scheduleClient: scheduleClient,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -26,9 +28,9 @@ func (s *ScheduleAPI) GetUserTodaySchedule(c *gin.Context) {
|
|||||||
// 1. 从请求上下文中获取用户ID
|
// 1. 从请求上下文中获取用户ID
|
||||||
userID := c.GetInt("user_id")
|
userID := c.GetInt("user_id")
|
||||||
//2.调用服务层方法获取用户当天的日程安排
|
//2.调用服务层方法获取用户当天的日程安排
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel() // 记得释放资源
|
defer cancel() // 记得释放资源
|
||||||
todaySchedules, err := s.scheduleService.GetUserTodaySchedule(ctx, userID)
|
todaySchedules, err := s.scheduleClient.GetUserTodaySchedule(ctx, userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -47,9 +49,9 @@ func (s *ScheduleAPI) GetUserWeeklySchedule(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
//3.调用服务层方法获取用户当周的日程安排
|
//3.调用服务层方法获取用户当周的日程安排
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel() // 记得释放资源
|
defer cancel() // 记得释放资源
|
||||||
weeklySchedules, err := s.scheduleService.GetUserWeeklySchedule(ctx, userID, week)
|
weeklySchedules, err := s.scheduleClient.GetUserWeeklySchedule(ctx, userID, week)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -62,15 +64,18 @@ func (s *ScheduleAPI) DeleteScheduleEvent(c *gin.Context) {
|
|||||||
// 1. 从请求上下文中获取用户ID
|
// 1. 从请求上下文中获取用户ID
|
||||||
userID := c.GetInt("user_id")
|
userID := c.GetInt("user_id")
|
||||||
// 2. 从请求体中获取要删除的日程事件信息
|
// 2. 从请求体中获取要删除的日程事件信息
|
||||||
var req []model.UserDeleteScheduleEvent
|
var req []schedulecontracts.UserDeleteScheduleEvent
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//3.调用服务层方法删除指定的日程事件
|
//3.调用服务层方法删除指定的日程事件
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel() // 记得释放资源
|
defer cancel() // 记得释放资源
|
||||||
err := s.scheduleService.DeleteScheduleEvent(ctx, req, userID)
|
err := s.scheduleClient.DeleteScheduleEvent(ctx, schedulecontracts.DeleteScheduleEventsRequest{
|
||||||
|
UserID: userID,
|
||||||
|
Events: req,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -95,9 +100,13 @@ func (s *ScheduleAPI) GetUserRecentCompletedSchedules(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
//2.调用服务层方法获取用户最近完成的日程事件
|
//2.调用服务层方法获取用户最近完成的日程事件
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel() // 记得释放资源
|
defer cancel() // 记得释放资源
|
||||||
completedSchedules, err := s.scheduleService.GetUserRecentCompletedSchedules(ctx, userID, intIndex, intLimit)
|
completedSchedules, err := s.scheduleClient.GetUserRecentCompletedSchedules(ctx, schedulecontracts.RecentCompletedRequest{
|
||||||
|
UserID: userID,
|
||||||
|
Index: intIndex,
|
||||||
|
Limit: intLimit,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -110,9 +119,9 @@ func (s *ScheduleAPI) GetUserOngoingSchedule(c *gin.Context) {
|
|||||||
// 1. 从请求上下文中获取用户ID
|
// 1. 从请求上下文中获取用户ID
|
||||||
userID := c.GetInt("user_id")
|
userID := c.GetInt("user_id")
|
||||||
//2.调用服务层方法获取用户正在进行的日程事件
|
//2.调用服务层方法获取用户正在进行的日程事件
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel() // 记得释放资源
|
defer cancel() // 记得释放资源
|
||||||
ongoingSchedule, err := s.scheduleService.GetUserOngoingSchedule(ctx, userID)
|
ongoingSchedule, err := s.scheduleClient.GetUserOngoingSchedule(ctx, userID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -132,7 +141,12 @@ func (s *ScheduleAPI) UserRevocateTaskItemFromSchedule(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
//3.调用服务层方法撤销任务块的安排
|
//3.调用服务层方法撤销任务块的安排
|
||||||
err = s.scheduleService.RevocateUserTaskClassItem(context.Background(), userID, intEventID)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
|
defer cancel()
|
||||||
|
err = s.scheduleClient.RevokeTaskItemFromSchedule(ctx, schedulecontracts.RevokeTaskItemRequest{
|
||||||
|
UserID: userID,
|
||||||
|
EventID: intEventID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -152,9 +166,12 @@ func (s *ScheduleAPI) SmartPlanning(c *gin.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
//3.调用服务层方法进行智能规划
|
//3.调用服务层方法进行智能规划
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel() // 记得释放资源
|
defer cancel() // 记得释放资源
|
||||||
res, err := s.scheduleService.SmartPlanning(ctx, userID, intTaskClassID)
|
res, err := s.scheduleClient.SmartPlanning(ctx, schedulecontracts.SmartPlanningRequest{
|
||||||
|
UserID: userID,
|
||||||
|
TaskClassID: intTaskClassID,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
@@ -174,16 +191,21 @@ func (s *ScheduleAPI) SmartPlanningMulti(c *gin.Context) {
|
|||||||
userID := c.GetInt("user_id")
|
userID := c.GetInt("user_id")
|
||||||
|
|
||||||
// 2. 绑定多任务类请求体。
|
// 2. 绑定多任务类请求体。
|
||||||
var req model.UserSmartPlanningMultiRequest
|
var req struct {
|
||||||
|
TaskClassIDs []int `json:"task_class_ids" binding:"required,min=1,dive,min=1"`
|
||||||
|
}
|
||||||
if err := c.ShouldBindJSON(&req); err != nil {
|
if err := c.ShouldBindJSON(&req); err != nil {
|
||||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// 3. 调用服务层执行多任务类粗排。
|
// 3. 调用服务层执行多任务类粗排。
|
||||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 1*time.Second)
|
ctx, cancel := context.WithTimeout(c.Request.Context(), scheduleRequestTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
res, err := s.scheduleService.SmartPlanningMulti(ctx, userID, req.TaskClassIDs)
|
res, err := s.scheduleClient.SmartPlanningMulti(ctx, schedulecontracts.SmartPlanningMultiRequest{
|
||||||
|
UserID: userID,
|
||||||
|
TaskClassIDs: req.TaskClassIDs,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respond.DealWithError(c, err)
|
respond.DealWithError(c, err)
|
||||||
return
|
return
|
||||||
|
|||||||
190
backend/gateway/client/schedule/client.go
Normal file
190
backend/gateway/client/schedule/client.go
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
package schedule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
schedulepb "github.com/LoveLosita/smartflow/backend/services/schedule/rpc/pb"
|
||||||
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
"github.com/zeromicro/go-zero/zrpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultEndpoint = "127.0.0.1:9084"
|
||||||
|
defaultTimeout = 6 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type ClientConfig struct {
|
||||||
|
Endpoints []string
|
||||||
|
Target string
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client 是 gateway 侧 schedule zrpc 的最小适配层。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只负责跨进程 gRPC 调用和 JSON 透传,不碰 DAO、粗排算法或正式日程 apply 状态机;
|
||||||
|
// 2. HTTP 入参仍由 gateway/api 做基础绑定,业务校验交给 schedule 服务;
|
||||||
|
// 3. 复杂响应不在 gateway 重建模型,避免 DTO 复制扩散。
|
||||||
|
type Client struct {
|
||||||
|
rpc schedulepb.ScheduleClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewClient(cfg ClientConfig) (*Client, error) {
|
||||||
|
timeout := cfg.Timeout
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeout
|
||||||
|
}
|
||||||
|
endpoints := normalizeEndpoints(cfg.Endpoints)
|
||||||
|
target := strings.TrimSpace(cfg.Target)
|
||||||
|
if len(endpoints) == 0 && target == "" {
|
||||||
|
endpoints = []string{defaultEndpoint}
|
||||||
|
}
|
||||||
|
|
||||||
|
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
|
||||||
|
Endpoints: endpoints,
|
||||||
|
Target: target,
|
||||||
|
NonBlock: true,
|
||||||
|
Timeout: int64(timeout / time.Millisecond),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
client := &Client{rpc: schedulepb.NewScheduleClient(zclient.Conn())}
|
||||||
|
if err := client.ping(timeout); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetUserTodaySchedule(ctx context.Context, userID int) (json.RawMessage, error) {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := c.rpc.GetToday(ctx, &schedulepb.UserRequest{UserId: int64(userID)})
|
||||||
|
return jsonFromResponse(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetUserWeeklySchedule(ctx context.Context, userID int, week int) (json.RawMessage, error) {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := c.rpc.GetWeek(ctx, &schedulepb.WeekRequest{UserId: int64(userID), Week: int64(week)})
|
||||||
|
return jsonFromResponse(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) DeleteScheduleEvent(ctx context.Context, req schedulecontracts.DeleteScheduleEventsRequest) error {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
eventsJSON, err := json.Marshal(req.Events)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.rpc.DeleteEvents(ctx, &schedulepb.DeleteEventsRequest{
|
||||||
|
UserId: int64(req.UserID),
|
||||||
|
EventsJson: eventsJSON,
|
||||||
|
})
|
||||||
|
return responseFromRPCError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetUserRecentCompletedSchedules(ctx context.Context, req schedulecontracts.RecentCompletedRequest) (json.RawMessage, error) {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := c.rpc.GetRecentCompleted(ctx, &schedulepb.RecentCompletedRequest{
|
||||||
|
UserId: int64(req.UserID),
|
||||||
|
Index: int64(req.Index),
|
||||||
|
Limit: int64(req.Limit),
|
||||||
|
})
|
||||||
|
return jsonFromResponse(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetUserOngoingSchedule(ctx context.Context, userID int) (json.RawMessage, error) {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := c.rpc.GetCurrent(ctx, &schedulepb.UserRequest{UserId: int64(userID)})
|
||||||
|
return jsonFromResponse(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) RevokeTaskItemFromSchedule(ctx context.Context, req schedulecontracts.RevokeTaskItemRequest) error {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err := c.rpc.RevokeTaskItem(ctx, &schedulepb.RevokeTaskItemRequest{
|
||||||
|
UserId: int64(req.UserID),
|
||||||
|
EventId: int64(req.EventID),
|
||||||
|
})
|
||||||
|
return responseFromRPCError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) SmartPlanning(ctx context.Context, req schedulecontracts.SmartPlanningRequest) (json.RawMessage, error) {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
resp, err := c.rpc.SmartPlanning(ctx, &schedulepb.SmartPlanningRequest{
|
||||||
|
UserId: int64(req.UserID),
|
||||||
|
TaskClassId: int64(req.TaskClassID),
|
||||||
|
})
|
||||||
|
return jsonFromResponse(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) SmartPlanningMulti(ctx context.Context, req schedulecontracts.SmartPlanningMultiRequest) (json.RawMessage, error) {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
taskClassIDs := make([]int64, 0, len(req.TaskClassIDs))
|
||||||
|
for _, id := range req.TaskClassIDs {
|
||||||
|
taskClassIDs = append(taskClassIDs, int64(id))
|
||||||
|
}
|
||||||
|
resp, err := c.rpc.SmartPlanningMulti(ctx, &schedulepb.SmartPlanningMultiRequest{
|
||||||
|
UserId: int64(req.UserID),
|
||||||
|
TaskClassIds: taskClassIDs,
|
||||||
|
})
|
||||||
|
return jsonFromResponse(resp, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ensureReady() error {
|
||||||
|
if c == nil || c.rpc == nil {
|
||||||
|
return errors.New("schedule zrpc client is not initialized")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) ping(timeout time.Duration) error {
|
||||||
|
if err := c.ensureReady(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
defer cancel()
|
||||||
|
_, err := c.rpc.Ping(ctx, &schedulepb.StatusResponse{})
|
||||||
|
return responseFromRPCError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func jsonFromResponse(resp *schedulepb.JSONResponse, rpcErr error) (json.RawMessage, error) {
|
||||||
|
if rpcErr != nil {
|
||||||
|
return nil, responseFromRPCError(rpcErr)
|
||||||
|
}
|
||||||
|
if resp == nil {
|
||||||
|
return nil, errors.New("schedule zrpc service returned empty JSON response")
|
||||||
|
}
|
||||||
|
if len(resp.DataJson) == 0 {
|
||||||
|
return json.RawMessage("null"), nil
|
||||||
|
}
|
||||||
|
return json.RawMessage(resp.DataJson), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeEndpoints(values []string) []string {
|
||||||
|
endpoints := make([]string, 0, len(values))
|
||||||
|
for _, value := range values {
|
||||||
|
trimmed := strings.TrimSpace(value)
|
||||||
|
if trimmed != "" {
|
||||||
|
endpoints = append(endpoints, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return endpoints
|
||||||
|
}
|
||||||
92
backend/gateway/client/schedule/errors.go
Normal file
92
backend/gateway/client/schedule/errors.go
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
package schedule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/respond"
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// responseFromRPCError 负责把 schedule 的 gRPC 错误反解回项目内错误。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只在 gateway 边缘层使用,不下沉到 schedule 服务实现里;
|
||||||
|
// 2. 业务错误尽量恢复成 respond.Response,方便 API 层继续复用 DealWithError;
|
||||||
|
// 3. 服务不可用或未知内部错误包装成普通 error,避免误报成用户可修正的参数问题。
|
||||||
|
func responseFromRPCError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
st, ok := status.FromError(err)
|
||||||
|
if !ok {
|
||||||
|
return wrapRPCError(err)
|
||||||
|
}
|
||||||
|
if resp, ok := responseFromStatus(st); ok {
|
||||||
|
return resp
|
||||||
|
}
|
||||||
|
|
||||||
|
switch st.Code() {
|
||||||
|
case codes.Internal, codes.Unknown, codes.Unavailable, codes.DeadlineExceeded, codes.DataLoss, codes.Unimplemented:
|
||||||
|
msg := strings.TrimSpace(st.Message())
|
||||||
|
if msg == "" {
|
||||||
|
msg = "schedule zrpc service internal error"
|
||||||
|
}
|
||||||
|
return wrapRPCError(errors.New(msg))
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := strings.TrimSpace(st.Message())
|
||||||
|
if msg == "" {
|
||||||
|
msg = "schedule zrpc service rejected request"
|
||||||
|
}
|
||||||
|
return respond.Response{Status: grpcCodeToRespondStatus(st.Code()), Info: msg}
|
||||||
|
}
|
||||||
|
|
||||||
|
func responseFromStatus(st *status.Status) (respond.Response, bool) {
|
||||||
|
if st == nil {
|
||||||
|
return respond.Response{}, false
|
||||||
|
}
|
||||||
|
for _, detail := range st.Details() {
|
||||||
|
info, ok := detail.(*errdetails.ErrorInfo)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
statusValue := strings.TrimSpace(info.Reason)
|
||||||
|
if statusValue == "" {
|
||||||
|
statusValue = grpcCodeToRespondStatus(st.Code())
|
||||||
|
}
|
||||||
|
message := strings.TrimSpace(st.Message())
|
||||||
|
if message == "" && info.Metadata != nil {
|
||||||
|
message = strings.TrimSpace(info.Metadata["info"])
|
||||||
|
}
|
||||||
|
if message == "" {
|
||||||
|
message = statusValue
|
||||||
|
}
|
||||||
|
return respond.Response{Status: statusValue, Info: message}, true
|
||||||
|
}
|
||||||
|
return respond.Response{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcCodeToRespondStatus(code codes.Code) string {
|
||||||
|
switch code {
|
||||||
|
case codes.Unauthenticated:
|
||||||
|
return respond.ErrUnauthorized.Status
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
return respond.MissingParam.Status
|
||||||
|
case codes.Internal, codes.Unknown, codes.DataLoss:
|
||||||
|
return "500"
|
||||||
|
default:
|
||||||
|
return "400"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapRPCError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("调用 schedule zrpc 服务失败: %w", err)
|
||||||
|
}
|
||||||
327
backend/services/active_scheduler/core/adapters/schedule_rpc.go
Normal file
327
backend/services/active_scheduler/core/adapters/schedule_rpc.go
Normal file
@@ -0,0 +1,327 @@
|
|||||||
|
package adapters
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||||
|
activeports "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||||
|
schedulepb "github.com/LoveLosita/smartflow/backend/services/schedule/rpc/pb"
|
||||||
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
"github.com/zeromicro/go-zero/zrpc"
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultScheduleRPCEndpoint = "127.0.0.1:9084"
|
||||||
|
defaultScheduleRPCTimeout = 6 * time.Second
|
||||||
|
scheduleApplyErrorDomain = "smartflow.schedule.apply"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ScheduleRPCConfig struct {
|
||||||
|
Endpoints []string
|
||||||
|
Target string
|
||||||
|
Timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScheduleRPCAdapter 是 active-scheduler 访问 schedule 服务的 RPC 适配器。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只把 active-scheduler 内部端口 DTO 与 shared/contracts/schedule 互转;
|
||||||
|
// 2. 不读取数据库、不实现 schedule 写入状态机;
|
||||||
|
// 3. 让 active-scheduler 不再直接访问 schedule_events / schedules。
|
||||||
|
type ScheduleRPCAdapter struct {
|
||||||
|
rpc schedulepb.ScheduleClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewScheduleRPCAdapter(cfg ScheduleRPCConfig) (*ScheduleRPCAdapter, error) {
|
||||||
|
timeout := cfg.Timeout
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultScheduleRPCTimeout
|
||||||
|
}
|
||||||
|
endpoints := normalizeScheduleRPCEndpoints(cfg.Endpoints)
|
||||||
|
target := strings.TrimSpace(cfg.Target)
|
||||||
|
if len(endpoints) == 0 && target == "" {
|
||||||
|
endpoints = []string{defaultScheduleRPCEndpoint}
|
||||||
|
}
|
||||||
|
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
|
||||||
|
Endpoints: endpoints,
|
||||||
|
Target: target,
|
||||||
|
NonBlock: true,
|
||||||
|
Timeout: int64(timeout / time.Millisecond),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
adapter := &ScheduleRPCAdapter{rpc: schedulepb.NewScheduleClient(zclient.Conn())}
|
||||||
|
if err := adapter.ping(timeout); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return adapter, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReadersWithScheduleRPC(taskReader activeports.TaskReader, scheduleReader *ScheduleRPCAdapter) activeports.Readers {
|
||||||
|
return activeports.Readers{
|
||||||
|
TaskReader: taskReader,
|
||||||
|
ScheduleReader: scheduleReader,
|
||||||
|
FeedbackReader: scheduleReader,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ScheduleRPCAdapter) GetScheduleFactsByWindow(ctx context.Context, req activeports.ScheduleWindowRequest) (activeports.ScheduleWindowFacts, error) {
|
||||||
|
if err := a.ensureReady(); err != nil {
|
||||||
|
return activeports.ScheduleWindowFacts{}, err
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(toScheduleWindowContract(req))
|
||||||
|
if err != nil {
|
||||||
|
return activeports.ScheduleWindowFacts{}, err
|
||||||
|
}
|
||||||
|
resp, err := a.rpc.GetScheduleFactsByWindow(ctx, &schedulepb.JSONRequest{PayloadJson: payload})
|
||||||
|
if err != nil {
|
||||||
|
return activeports.ScheduleWindowFacts{}, scheduleRPCError(err)
|
||||||
|
}
|
||||||
|
var facts schedulecontracts.ScheduleWindowFacts
|
||||||
|
if err := json.Unmarshal(jsonBytes(resp), &facts); err != nil {
|
||||||
|
return activeports.ScheduleWindowFacts{}, err
|
||||||
|
}
|
||||||
|
return scheduleFactsToActive(facts), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ScheduleRPCAdapter) GetFeedbackSignal(ctx context.Context, req activeports.FeedbackRequest) (activeports.FeedbackFact, bool, error) {
|
||||||
|
if err := a.ensureReady(); err != nil {
|
||||||
|
return activeports.FeedbackFact{}, false, err
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(schedulecontracts.FeedbackRequest{
|
||||||
|
UserID: req.UserID,
|
||||||
|
FeedbackID: req.FeedbackID,
|
||||||
|
IdempotencyKey: req.IdempotencyKey,
|
||||||
|
TargetType: req.TargetType,
|
||||||
|
TargetID: req.TargetID,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return activeports.FeedbackFact{}, false, err
|
||||||
|
}
|
||||||
|
resp, err := a.rpc.GetFeedbackSignal(ctx, &schedulepb.JSONRequest{PayloadJson: payload})
|
||||||
|
if err != nil {
|
||||||
|
return activeports.FeedbackFact{}, false, scheduleRPCError(err)
|
||||||
|
}
|
||||||
|
var contractResp schedulecontracts.FeedbackResponse
|
||||||
|
if err := json.Unmarshal(jsonBytes(resp), &contractResp); err != nil {
|
||||||
|
return activeports.FeedbackFact{}, false, err
|
||||||
|
}
|
||||||
|
return feedbackFactToActive(contractResp.Feedback), contractResp.Found, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ScheduleRPCAdapter) ApplyActiveScheduleChanges(ctx context.Context, req activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error) {
|
||||||
|
if err := a.ensureReady(); err != nil {
|
||||||
|
return activeapplyadapter.ApplyActiveScheduleResult{}, err
|
||||||
|
}
|
||||||
|
payload, err := json.Marshal(toScheduleApplyContract(req))
|
||||||
|
if err != nil {
|
||||||
|
return activeapplyadapter.ApplyActiveScheduleResult{}, err
|
||||||
|
}
|
||||||
|
resp, err := a.rpc.ApplyActiveScheduleChanges(ctx, &schedulepb.JSONRequest{PayloadJson: payload})
|
||||||
|
if err != nil {
|
||||||
|
return activeapplyadapter.ApplyActiveScheduleResult{}, scheduleRPCError(err)
|
||||||
|
}
|
||||||
|
var result schedulecontracts.ApplyActiveScheduleResult
|
||||||
|
if err := json.Unmarshal(jsonBytes(resp), &result); err != nil {
|
||||||
|
return activeapplyadapter.ApplyActiveScheduleResult{}, err
|
||||||
|
}
|
||||||
|
return activeapplyadapter.ApplyActiveScheduleResult{
|
||||||
|
ApplyID: result.ApplyID,
|
||||||
|
AppliedEventIDs: result.AppliedEventIDs,
|
||||||
|
AppliedScheduleIDs: result.AppliedScheduleIDs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ScheduleRPCAdapter) ensureReady() error {
|
||||||
|
if a == nil || a.rpc == nil {
|
||||||
|
return errors.New("schedule rpc adapter 未初始化")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *ScheduleRPCAdapter) ping(timeout time.Duration) error {
|
||||||
|
if err := a.ensureReady(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||||
|
defer cancel()
|
||||||
|
_, err := a.rpc.Ping(ctx, &schedulepb.StatusResponse{})
|
||||||
|
return scheduleRPCError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleRPCError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
st, ok := status.FromError(err)
|
||||||
|
if !ok {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for _, detail := range st.Details() {
|
||||||
|
info, ok := detail.(*errdetails.ErrorInfo)
|
||||||
|
if !ok || info.Domain != scheduleApplyErrorDomain {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
message := strings.TrimSpace(st.Message())
|
||||||
|
if message == "" && info.Metadata != nil {
|
||||||
|
message = strings.TrimSpace(info.Metadata["info"])
|
||||||
|
}
|
||||||
|
return &activeapplyadapter.ApplyError{
|
||||||
|
Code: strings.TrimSpace(info.Reason),
|
||||||
|
Message: message,
|
||||||
|
Cause: err,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if st.Code() == codes.Internal || st.Code() == codes.Unavailable || st.Code() == codes.DeadlineExceeded {
|
||||||
|
return fmt.Errorf("调用 schedule zrpc 服务失败: %w", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func toScheduleWindowContract(req activeports.ScheduleWindowRequest) schedulecontracts.ScheduleWindowRequest {
|
||||||
|
return schedulecontracts.ScheduleWindowRequest{
|
||||||
|
UserID: req.UserID,
|
||||||
|
TargetType: req.TargetType,
|
||||||
|
TargetID: req.TargetID,
|
||||||
|
WindowStart: req.WindowStart,
|
||||||
|
WindowEnd: req.WindowEnd,
|
||||||
|
Now: req.Now,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleFactsToActive(facts schedulecontracts.ScheduleWindowFacts) activeports.ScheduleWindowFacts {
|
||||||
|
events := make([]activeports.ScheduleEventFact, 0, len(facts.Events))
|
||||||
|
for _, event := range facts.Events {
|
||||||
|
events = append(events, scheduleEventFactToActive(event))
|
||||||
|
}
|
||||||
|
return activeports.ScheduleWindowFacts{
|
||||||
|
Events: events,
|
||||||
|
OccupiedSlots: scheduleSlotsToActive(facts.OccupiedSlots),
|
||||||
|
FreeSlots: scheduleSlotsToActive(facts.FreeSlots),
|
||||||
|
NextDynamicTask: scheduleEventFactPtrToActive(facts.NextDynamicTask),
|
||||||
|
TargetAlreadyScheduled: facts.TargetAlreadyScheduled,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleEventFactToActive(event schedulecontracts.ScheduleEventFact) activeports.ScheduleEventFact {
|
||||||
|
return activeports.ScheduleEventFact{
|
||||||
|
ID: event.ID,
|
||||||
|
UserID: event.UserID,
|
||||||
|
Title: event.Title,
|
||||||
|
SourceType: event.SourceType,
|
||||||
|
RelID: event.RelID,
|
||||||
|
IsDynamicTask: event.IsDynamicTask,
|
||||||
|
IsCompleted: event.IsCompleted,
|
||||||
|
Slots: scheduleSlotsToActive(event.Slots),
|
||||||
|
TaskClassID: event.TaskClassID,
|
||||||
|
TaskItemID: event.TaskItemID,
|
||||||
|
CanBeShortened: event.CanBeShortened,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleEventFactPtrToActive(event *schedulecontracts.ScheduleEventFact) *activeports.ScheduleEventFact {
|
||||||
|
if event == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
converted := scheduleEventFactToActive(*event)
|
||||||
|
return &converted
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleSlotsToActive(slots []schedulecontracts.Slot) []activeports.Slot {
|
||||||
|
out := make([]activeports.Slot, 0, len(slots))
|
||||||
|
for _, slot := range slots {
|
||||||
|
out = append(out, activeports.Slot{
|
||||||
|
Week: slot.Week,
|
||||||
|
DayOfWeek: slot.DayOfWeek,
|
||||||
|
Section: slot.Section,
|
||||||
|
StartAt: slot.StartAt,
|
||||||
|
EndAt: slot.EndAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
func feedbackFactToActive(feedback schedulecontracts.FeedbackFact) activeports.FeedbackFact {
|
||||||
|
return activeports.FeedbackFact{
|
||||||
|
FeedbackID: feedback.FeedbackID,
|
||||||
|
Text: feedback.Text,
|
||||||
|
TargetKnown: feedback.TargetKnown,
|
||||||
|
TargetEventID: feedback.TargetEventID,
|
||||||
|
TargetTaskItemID: feedback.TargetTaskItemID,
|
||||||
|
TargetTitle: feedback.TargetTitle,
|
||||||
|
SubmittedAt: feedback.SubmittedAt,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toScheduleApplyContract(req activeapplyadapter.ApplyActiveScheduleRequest) schedulecontracts.ApplyActiveScheduleRequest {
|
||||||
|
changes := make([]schedulecontracts.ApplyChange, 0, len(req.Changes))
|
||||||
|
for _, change := range req.Changes {
|
||||||
|
changes = append(changes, schedulecontracts.ApplyChange{
|
||||||
|
ChangeID: change.ChangeID,
|
||||||
|
ChangeType: change.ChangeType,
|
||||||
|
TargetType: change.TargetType,
|
||||||
|
TargetID: change.TargetID,
|
||||||
|
ToSlot: toScheduleSlotSpan(change.ToSlot),
|
||||||
|
DurationSections: change.DurationSections,
|
||||||
|
Metadata: cloneStringMap(change.Metadata),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return schedulecontracts.ApplyActiveScheduleRequest{
|
||||||
|
PreviewID: req.PreviewID,
|
||||||
|
ApplyID: req.ApplyID,
|
||||||
|
UserID: req.UserID,
|
||||||
|
CandidateID: req.CandidateID,
|
||||||
|
Changes: changes,
|
||||||
|
RequestedAt: req.RequestedAt,
|
||||||
|
TraceID: req.TraceID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toScheduleSlotSpan(span *activeapplyadapter.SlotSpan) *schedulecontracts.SlotSpan {
|
||||||
|
if span == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &schedulecontracts.SlotSpan{
|
||||||
|
Start: schedulecontracts.Slot{Week: span.Start.Week, DayOfWeek: span.Start.DayOfWeek, Section: span.Start.Section},
|
||||||
|
End: schedulecontracts.Slot{Week: span.End.Week, DayOfWeek: span.End.DayOfWeek, Section: span.End.Section},
|
||||||
|
DurationSections: span.DurationSections,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func jsonBytes(resp *schedulepb.JSONResponse) []byte {
|
||||||
|
if resp == nil || len(resp.DataJson) == 0 {
|
||||||
|
return []byte("null")
|
||||||
|
}
|
||||||
|
return resp.DataJson
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeScheduleRPCEndpoints(values []string) []string {
|
||||||
|
endpoints := make([]string, 0, len(values))
|
||||||
|
for _, value := range values {
|
||||||
|
trimmed := strings.TrimSpace(value)
|
||||||
|
if trimmed != "" {
|
||||||
|
endpoints = append(endpoints, trimmed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return endpoints
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneStringMap(input map[string]string) map[string]string {
|
||||||
|
if len(input) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
output := make(map[string]string, len(input))
|
||||||
|
for key, value := range input {
|
||||||
|
output[key] = value
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
@@ -24,11 +24,15 @@ type PreviewConfirmService struct {
|
|||||||
dryRun *DryRunService
|
dryRun *DryRunService
|
||||||
preview *activepreview.Service
|
preview *activepreview.Service
|
||||||
activeDAO *dao.ActiveScheduleDAO
|
activeDAO *dao.ActiveScheduleDAO
|
||||||
applyAdapter *applyadapter.GormApplyAdapter
|
applyAdapter scheduleApplyAdapter
|
||||||
clock func() time.Time
|
clock func() time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewPreviewConfirmService(dryRun *DryRunService, previewService *activepreview.Service, activeDAO *dao.ActiveScheduleDAO, applyAdapter *applyadapter.GormApplyAdapter) (*PreviewConfirmService, error) {
|
type scheduleApplyAdapter interface {
|
||||||
|
ApplyActiveScheduleChanges(ctx context.Context, req applyadapter.ApplyActiveScheduleRequest) (applyadapter.ApplyActiveScheduleResult, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPreviewConfirmService(dryRun *DryRunService, previewService *activepreview.Service, activeDAO *dao.ActiveScheduleDAO, applyAdapter scheduleApplyAdapter) (*PreviewConfirmService, error) {
|
||||||
if dryRun == nil {
|
if dryRun == nil {
|
||||||
return nil, errors.New("dry-run service 不能为空")
|
return nil, errors.New("dry-run service 不能为空")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -75,7 +75,7 @@ type runtimeDependencyTable struct {
|
|||||||
//
|
//
|
||||||
// 职责边界:
|
// 职责边界:
|
||||||
// 1. 只检查表是否存在,不 AutoMigrate、不补列、不修改任何跨域表;
|
// 1. 只检查表是否存在,不 AutoMigrate、不补列、不修改任何跨域表;
|
||||||
// 2. 把 active-scheduler 运行时仍然需要的 task / schedule / agent / notification outbox 边界显式化;
|
// 2. 把 active-scheduler 运行时仍然需要的 task / agent / notification outbox 边界显式化;
|
||||||
// 3. 若部署顺序、库权限或表结构归属不满足,启动阶段直接 fail fast,避免第一次 trigger 才反复重试。
|
// 3. 若部署顺序、库权限或表结构归属不满足,启动阶段直接 fail fast,避免第一次 trigger 才反复重试。
|
||||||
func ensureRuntimeDependencyTables(db *gorm.DB) error {
|
func ensureRuntimeDependencyTables(db *gorm.DB) error {
|
||||||
if db == nil {
|
if db == nil {
|
||||||
@@ -110,7 +110,7 @@ func ensureTableExists(db *gorm.DB, table runtimeDependencyTable) error {
|
|||||||
// 说明:
|
// 说明:
|
||||||
// 1. active-scheduler 自有表在 OpenDBFromConfig 内迁移,这里只放跨域依赖;
|
// 1. active-scheduler 自有表在 OpenDBFromConfig 内迁移,这里只放跨域依赖;
|
||||||
// 2. notification outbox 表名来自 service catalog,避免和 outbox 多表路由配置漂移;
|
// 2. notification outbox 表名来自 service catalog,避免和 outbox 多表路由配置漂移;
|
||||||
// 3. 后续切到 task/schedule/agent/notification RPC 或 read model 后,应从这里移除对应表依赖。
|
// 3. schedule 读写已切到 schedule RPC;后续切到 task/agent/notification RPC 或 read model 后,应继续移除对应表依赖。
|
||||||
func activeSchedulerRuntimeDependencyTables() []runtimeDependencyTable {
|
func activeSchedulerRuntimeDependencyTables() []runtimeDependencyTable {
|
||||||
notificationOutboxTable := "notification_outbox_messages"
|
notificationOutboxTable := "notification_outbox_messages"
|
||||||
if cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceNotification); ok && cfg.TableName != "" {
|
if cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceNotification); ok && cfg.TableName != "" {
|
||||||
@@ -118,11 +118,7 @@ func activeSchedulerRuntimeDependencyTables() []runtimeDependencyTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return []runtimeDependencyTable{
|
return []runtimeDependencyTable{
|
||||||
{Name: "tasks", Reason: "dry-run 读取 task_pool 事实,confirm 时锁定 task_pool 目标"},
|
{Name: "tasks", Reason: "迁移期 dry-run / due job scanner 仍读取 task_pool 事实,下一轮切 task RPC 后移除"},
|
||||||
{Name: "schedule_events", Reason: "dry-run 读取日程事实,confirm 时写入正式日程事件"},
|
|
||||||
{Name: "schedules", Reason: "dry-run 读取节次占用,confirm 时写入正式节次"},
|
|
||||||
{Name: "task_classes", Reason: "confirm create_makeup 时校验 task_item 归属"},
|
|
||||||
{Name: "task_items", Reason: "confirm create_makeup 时锁定 task_item 目标"},
|
|
||||||
{Name: "agent_chats", Reason: "trigger 生成 preview 后预建主动调度会话"},
|
{Name: "agent_chats", Reason: "trigger 生成 preview 后预建主动调度会话"},
|
||||||
{Name: "chat_histories", Reason: "trigger 生成 preview 后写入会话首屏消息"},
|
{Name: "chat_histories", Reason: "trigger 生成 preview 后写入会话首屏消息"},
|
||||||
{Name: "agent_timeline_events", Reason: "trigger 生成 preview 后写入主动调度时间线卡片"},
|
{Name: "agent_timeline_events", Reason: "trigger 生成 preview 后写入主动调度时间线卡片"},
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ import (
|
|||||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||||
activejob "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/job"
|
activejob "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/job"
|
||||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||||
@@ -39,6 +39,7 @@ type Options struct {
|
|||||||
JobScanEvery time.Duration
|
JobScanEvery time.Duration
|
||||||
JobScanLimit int
|
JobScanLimit int
|
||||||
KafkaConfig kafkabus.Config
|
KafkaConfig kafkabus.Config
|
||||||
|
ScheduleRPC activeadapters.ScheduleRPCConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
// Service 是 active-scheduler 独立进程内的服务门面。
|
// Service 是 active-scheduler 独立进程内的服务门面。
|
||||||
@@ -69,12 +70,16 @@ func New(db *gorm.DB, llmService *llmservice.Service, opts Options) (*Service, e
|
|||||||
|
|
||||||
activeDAO := rootdao.NewActiveScheduleDAO(db)
|
activeDAO := rootdao.NewActiveScheduleDAO(db)
|
||||||
activeReaders := activeadapters.NewGormReaders(db)
|
activeReaders := activeadapters.NewGormReaders(db)
|
||||||
readers := activeadapters.ReadersFromGorm(activeReaders)
|
scheduleRPCAdapter, err := activeadapters.NewScheduleRPCAdapter(opts.ScheduleRPC)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("initialize schedule rpc adapter failed: %w", err)
|
||||||
|
}
|
||||||
|
readers := activeadapters.ReadersWithScheduleRPC(activeReaders, scheduleRPCAdapter)
|
||||||
dryRun, err := activesvc.NewDryRunService(readers)
|
dryRun, err := activesvc.NewDryRunService(readers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
previewConfirm, err := buildPreviewConfirmService(db, activeDAO, dryRun)
|
previewConfirm, err := buildPreviewConfirmService(activeDAO, dryRun, scheduleRPCAdapter)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -259,12 +264,14 @@ func (s *Service) ConfirmPreview(ctx context.Context, req contracts.ConfirmPrevi
|
|||||||
return marshalResponseJSON(result)
|
return marshalResponseJSON(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPreviewConfirmService(db *gorm.DB, activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService) (*activesvc.PreviewConfirmService, error) {
|
func buildPreviewConfirmService(activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||||
|
ApplyActiveScheduleChanges(context.Context, activeapplyadapter.ApplyActiveScheduleRequest) (activeapplyadapter.ApplyActiveScheduleResult, error)
|
||||||
|
}) (*activesvc.PreviewConfirmService, error) {
|
||||||
previewService, err := activepreview.NewService(activeDAO)
|
previewService, err := activepreview.NewService(activeDAO)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, applyadapter.NewGormApplyAdapter(db))
|
return activesvc.NewPreviewConfirmService(dryRun, previewService, activeDAO, scheduleApplyAdapter)
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildGraphRunner(dryRun *activesvc.DryRunService, llmService *llmservice.Service) (*activegraph.Runner, error) {
|
func buildGraphRunner(dryRun *activesvc.DryRunService, llmService *llmservice.Service) (*activegraph.Runner, error) {
|
||||||
|
|||||||
496
backend/services/schedule/core/applyadapter/adapter.go
Normal file
496
backend/services/schedule/core/applyadapter/adapter.go
Normal file
@@ -0,0 +1,496 @@
|
|||||||
|
package applyadapter
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/conv"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/model"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
"gorm.io/gorm/clause"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GormApplyAdapter 负责把主动调度确认后的变更写入正式 schedule 表。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只写 schedule_events / schedules,并在事务内完成目标重校验与冲突重校验;
|
||||||
|
// 2. 不回写 active_schedule_previews,不发布 outbox,不调用 API/service/task;
|
||||||
|
// 3. 不创建 task_item,也不更新 task / task_items 状态,task_pool 是否已安排由 schedule_events 反查判断。
|
||||||
|
//
|
||||||
|
// 迁移说明:
|
||||||
|
// 1. 本文件从 active-scheduler applyadapter 原样搬入 schedule 服务,先让正式日程写所有权回到 schedule;
|
||||||
|
// 2. active-scheduler 下的旧 adapter 本轮暂留作回退与历史编译兼容,切流稳定后再删除旧实现;
|
||||||
|
// 3. 暂不抽 shared 公共层,因为该逻辑属于 schedule 写模型状态机,放入 shared 会污染跨进程契约层。
|
||||||
|
type GormApplyAdapter struct {
|
||||||
|
db *gorm.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewGormApplyAdapter(db *gorm.DB) *GormApplyAdapter {
|
||||||
|
return &GormApplyAdapter{db: db}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyActiveScheduleChanges 在单个数据库事务内应用主动调度变更。
|
||||||
|
//
|
||||||
|
// 事务语义:
|
||||||
|
// 1. 先规范化所有 change 的节次,并检查本次请求内部是否自相冲突;
|
||||||
|
// 2. 事务内锁定目标事实并重查 schedules 占用,任何冲突都直接返回 slot_conflict;
|
||||||
|
// 3. 所有 event 和 schedules 都成功插入后才提交;任一错误都会回滚,避免半写。
|
||||||
|
//
|
||||||
|
// 输入输出:
|
||||||
|
// 1. req.UserID / req.PreviewID / req.Changes 必须有效;
|
||||||
|
// 2. 返回的 AppliedEventIDs 是新建 schedule_events.id;
|
||||||
|
// 3. error 若为 *ApplyError,上游可按 Code 分类处理。
|
||||||
|
func (a *GormApplyAdapter) ApplyActiveScheduleChanges(ctx context.Context, req ApplyActiveScheduleRequest) (ApplyActiveScheduleResult, error) {
|
||||||
|
if a == nil || a.db == nil {
|
||||||
|
return ApplyActiveScheduleResult{}, newApplyError(ErrorCodeInvalidRequest, "主动调度 apply adapter 未初始化", nil)
|
||||||
|
}
|
||||||
|
normalized, err := normalizeRequest(req)
|
||||||
|
if err != nil {
|
||||||
|
return ApplyActiveScheduleResult{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := ApplyActiveScheduleResult{ApplyID: req.ApplyID}
|
||||||
|
err = a.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
|
||||||
|
appliedEventIDs := make([]int, 0, len(normalized))
|
||||||
|
appliedScheduleIDs := make([]int, 0)
|
||||||
|
for _, change := range normalized {
|
||||||
|
var eventIDs []int
|
||||||
|
var scheduleIDs []int
|
||||||
|
var applyErr error
|
||||||
|
switch {
|
||||||
|
case isAddTaskPoolChange(change):
|
||||||
|
eventIDs, scheduleIDs, applyErr = a.applyTaskPoolChange(ctx, tx, req, change)
|
||||||
|
case isCreateMakeupChange(change):
|
||||||
|
eventIDs, scheduleIDs, applyErr = a.applyMakeupChange(ctx, tx, req, change)
|
||||||
|
default:
|
||||||
|
applyErr = newApplyError(ErrorCodeUnsupportedChangeType, fmt.Sprintf("不支持的主动调度变更类型:%s", change.ChangeType), nil)
|
||||||
|
}
|
||||||
|
if applyErr != nil {
|
||||||
|
return applyErr
|
||||||
|
}
|
||||||
|
appliedEventIDs = append(appliedEventIDs, eventIDs...)
|
||||||
|
appliedScheduleIDs = append(appliedScheduleIDs, scheduleIDs...)
|
||||||
|
}
|
||||||
|
result.AppliedEventIDs = appliedEventIDs
|
||||||
|
result.AppliedScheduleIDs = appliedScheduleIDs
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return ApplyActiveScheduleResult{}, classifyDBError(err)
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GormApplyAdapter) applyTaskPoolChange(ctx context.Context, tx *gorm.DB, req ApplyActiveScheduleRequest, change normalizedChange) ([]int, []int, error) {
|
||||||
|
targetID := change.TargetID
|
||||||
|
if change.TargetType != "" && change.TargetType != TargetTypeTaskPool {
|
||||||
|
return nil, nil, newApplyError(ErrorCodeInvalidEditedChanges, "add_task_pool_to_schedule 只能写入 task_pool 目标", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 调用目的:锁住同一个 task_pool 任务,串行化“是否已经进入日程”的判断,避免并发确认写出重复任务块。
|
||||||
|
task, err := lockTaskPool(ctx, tx, req.UserID, targetID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if task.IsCompleted {
|
||||||
|
return nil, nil, newApplyError(ErrorCodeTargetCompleted, "task_pool 任务已完成,不能再加入日程", nil)
|
||||||
|
}
|
||||||
|
if err := ensureTaskPoolNotScheduled(ctx, tx, req.UserID, task.ID); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err := ensureSlotsFree(ctx, tx, req.UserID, change); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
eventName := strings.TrimSpace(task.Title)
|
||||||
|
if eventName == "" {
|
||||||
|
eventName = fmt.Sprintf("任务 %d", task.ID)
|
||||||
|
}
|
||||||
|
relID := task.ID
|
||||||
|
return insertTaskEventWithSchedules(ctx, tx, req, change, eventPayload{
|
||||||
|
Name: eventName,
|
||||||
|
TaskSourceType: TaskSourceTypeTaskPool,
|
||||||
|
RelID: relID,
|
||||||
|
Sections: change.Sections,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *GormApplyAdapter) applyMakeupChange(ctx context.Context, tx *gorm.DB, req ApplyActiveScheduleRequest, change normalizedChange) ([]int, []int, error) {
|
||||||
|
target, err := resolveMakeupTarget(ctx, tx, req.UserID, change)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if err := ensureSlotsFree(ctx, tx, req.UserID, change); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return insertTaskEventWithSchedules(ctx, tx, req, change, eventPayload{
|
||||||
|
Name: target.Name,
|
||||||
|
TaskSourceType: target.TaskSourceType,
|
||||||
|
RelID: target.RelID,
|
||||||
|
MakeupForEventID: &target.MakeupForEventID,
|
||||||
|
Sections: change.Sections,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type normalizedChange struct {
|
||||||
|
ApplyChange
|
||||||
|
Week int
|
||||||
|
DayOfWeek int
|
||||||
|
Sections []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeRequest(req ApplyActiveScheduleRequest) ([]normalizedChange, error) {
|
||||||
|
if req.UserID <= 0 {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidRequest, "user_id 不能为空", nil)
|
||||||
|
}
|
||||||
|
if strings.TrimSpace(req.PreviewID) == "" {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidRequest, "preview_id 不能为空", nil)
|
||||||
|
}
|
||||||
|
if len(req.Changes) == 0 {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidRequest, "changes 不能为空", nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
seenSlots := make(map[string]struct{})
|
||||||
|
normalized := make([]normalizedChange, 0, len(req.Changes))
|
||||||
|
for _, change := range req.Changes {
|
||||||
|
sections, err := normalizeSections(change)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, section := range sections {
|
||||||
|
key := fmt.Sprintf("%d:%d:%d", change.ToSlot.Start.Week, change.ToSlot.Start.DayOfWeek, section)
|
||||||
|
if _, exists := seenSlots[key]; exists {
|
||||||
|
return nil, newApplyError(ErrorCodeSlotConflict, "本次确认请求内部存在重复节次", nil)
|
||||||
|
}
|
||||||
|
seenSlots[key] = struct{}{}
|
||||||
|
}
|
||||||
|
normalized = append(normalized, normalizedChange{
|
||||||
|
ApplyChange: change,
|
||||||
|
Week: change.ToSlot.Start.Week,
|
||||||
|
DayOfWeek: change.ToSlot.Start.DayOfWeek,
|
||||||
|
Sections: sections,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return normalized, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func normalizeSections(change ApplyChange) ([]int, error) {
|
||||||
|
if change.TargetID <= 0 {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "变更目标 ID 不能为空", nil)
|
||||||
|
}
|
||||||
|
if change.ToSlot == nil {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "变更缺少目标节次", nil)
|
||||||
|
}
|
||||||
|
start := change.ToSlot.Start
|
||||||
|
end := change.ToSlot.End
|
||||||
|
if start.Week <= 0 || start.DayOfWeek < 1 || start.DayOfWeek > 7 || start.Section < 1 || start.Section > 12 {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标起始节次不合法", nil)
|
||||||
|
}
|
||||||
|
duration := change.DurationSections
|
||||||
|
if duration <= 0 {
|
||||||
|
duration = change.ToSlot.DurationSections
|
||||||
|
}
|
||||||
|
if end.Section <= 0 && duration > 0 {
|
||||||
|
end = Slot{Week: start.Week, DayOfWeek: start.DayOfWeek, Section: start.Section + duration - 1}
|
||||||
|
}
|
||||||
|
if end.Week <= 0 && end.DayOfWeek <= 0 && end.Section <= 0 {
|
||||||
|
end = start
|
||||||
|
}
|
||||||
|
if end.Week != start.Week || end.DayOfWeek != start.DayOfWeek || end.Section < start.Section {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标节次必须是同一天内的连续区间", nil)
|
||||||
|
}
|
||||||
|
if end.Section > 12 {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标结束节次不合法", nil)
|
||||||
|
}
|
||||||
|
actualDuration := end.Section - start.Section + 1
|
||||||
|
if duration > 0 && duration != actualDuration {
|
||||||
|
return nil, newApplyError(ErrorCodeInvalidEditedChanges, "duration_sections 与目标节次跨度不一致", nil)
|
||||||
|
}
|
||||||
|
sections := make([]int, 0, actualDuration)
|
||||||
|
for section := start.Section; section <= end.Section; section++ {
|
||||||
|
sections = append(sections, section)
|
||||||
|
}
|
||||||
|
return sections, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isAddTaskPoolChange(change normalizedChange) bool {
|
||||||
|
if change.ChangeType == ChangeTypeAddTaskPoolToSchedule {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return change.ChangeType == changeTypeAdd && change.TargetType == TargetTypeTaskPool
|
||||||
|
}
|
||||||
|
|
||||||
|
func isCreateMakeupChange(change normalizedChange) bool {
|
||||||
|
return change.ChangeType == ChangeTypeCreateMakeup
|
||||||
|
}
|
||||||
|
|
||||||
|
func lockTaskPool(ctx context.Context, tx *gorm.DB, userID, taskID int) (model.Task, error) {
|
||||||
|
var task model.Task
|
||||||
|
err := tx.WithContext(ctx).
|
||||||
|
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||||
|
Where("id = ? AND user_id = ?", taskID, userID).
|
||||||
|
First(&task).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return model.Task{}, newApplyError(ErrorCodeTargetNotFound, "task_pool 任务不存在或不属于当前用户", nil)
|
||||||
|
}
|
||||||
|
return model.Task{}, newApplyError(ErrorCodeDBError, "读取 task_pool 任务失败", err)
|
||||||
|
}
|
||||||
|
return task, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureTaskPoolNotScheduled(ctx context.Context, tx *gorm.DB, userID, taskID int) error {
|
||||||
|
var count int64
|
||||||
|
err := tx.WithContext(ctx).
|
||||||
|
Model(&model.ScheduleEvent{}).
|
||||||
|
Where("user_id = ? AND type = ? AND task_source_type = ? AND rel_id = ?", userID, scheduleEventTypeTask, TaskSourceTypeTaskPool, taskID).
|
||||||
|
Count(&count).Error
|
||||||
|
if err != nil {
|
||||||
|
return newApplyError(ErrorCodeDBError, "检查 task_pool 是否已进入日程失败", err)
|
||||||
|
}
|
||||||
|
if count > 0 {
|
||||||
|
return newApplyError(ErrorCodeTargetAlreadyScheduled, "task_pool 任务已进入日程", nil)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureSlotsFree(ctx context.Context, tx *gorm.DB, userID int, change normalizedChange) error {
|
||||||
|
sections := change.Sections
|
||||||
|
if len(sections) == 0 {
|
||||||
|
return newApplyError(ErrorCodeInvalidEditedChanges, "目标节次不能为空", nil)
|
||||||
|
}
|
||||||
|
sort.Ints(sections)
|
||||||
|
startSection := sections[0]
|
||||||
|
endSection := sections[len(sections)-1]
|
||||||
|
|
||||||
|
// 1. 在事务内对目标节次加行锁,命中任何已有 schedules 都视为冲突。
|
||||||
|
// 2. 若并发事务在检查后抢先插入同一唯一键,后续 Create 会被唯一索引兜底拦截并整体回滚。
|
||||||
|
// 3. MVP 不处理课程嵌入,任何已有课程、固定日程或任务都不可覆盖。
|
||||||
|
var occupied []model.Schedule
|
||||||
|
err := tx.WithContext(ctx).
|
||||||
|
Model(&model.Schedule{}).
|
||||||
|
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||||
|
Where("user_id = ? AND week = ? AND day_of_week = ? AND section IN ?", userID, change.Week, change.DayOfWeek, sections).
|
||||||
|
Find(&occupied).Error
|
||||||
|
if err != nil {
|
||||||
|
return newApplyError(ErrorCodeDBError, "检查目标节次冲突失败", err)
|
||||||
|
}
|
||||||
|
if len(occupied) > 0 {
|
||||||
|
return newApplyError(ErrorCodeSlotConflict, fmt.Sprintf("第 %d-%d 节已被占用", startSection, endSection), nil)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type eventPayload struct {
|
||||||
|
Name string
|
||||||
|
TaskSourceType string
|
||||||
|
RelID int
|
||||||
|
MakeupForEventID *int
|
||||||
|
Sections []int
|
||||||
|
}
|
||||||
|
|
||||||
|
func insertTaskEventWithSchedules(ctx context.Context, tx *gorm.DB, req ApplyActiveScheduleRequest, change normalizedChange, payload eventPayload) ([]int, []int, error) {
|
||||||
|
sections := append([]int(nil), payload.Sections...)
|
||||||
|
sort.Ints(sections)
|
||||||
|
start := sections[0]
|
||||||
|
end := sections[len(sections)-1]
|
||||||
|
startTime, endTime, err := conv.RelativeTimeToRealTime(change.Week, change.DayOfWeek, start, end)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, newApplyError(ErrorCodeInvalidEditedChanges, "目标节次无法转换为绝对时间", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
previewID := strings.TrimSpace(req.PreviewID)
|
||||||
|
event := model.ScheduleEvent{
|
||||||
|
UserID: req.UserID,
|
||||||
|
Name: payload.Name,
|
||||||
|
Type: scheduleEventTypeTask,
|
||||||
|
TaskSourceType: payload.TaskSourceType,
|
||||||
|
RelID: &payload.RelID,
|
||||||
|
MakeupForEventID: payload.MakeupForEventID,
|
||||||
|
ActivePreviewID: &previewID,
|
||||||
|
CanBeEmbedded: false,
|
||||||
|
StartTime: startTime,
|
||||||
|
EndTime: endTime,
|
||||||
|
}
|
||||||
|
if err := tx.WithContext(ctx).Create(&event).Error; err != nil {
|
||||||
|
return nil, nil, newApplyError(ErrorCodeDBError, "写入 schedule_events 失败", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
schedules := make([]model.Schedule, 0, len(sections))
|
||||||
|
for _, section := range sections {
|
||||||
|
schedules = append(schedules, model.Schedule{
|
||||||
|
EventID: event.ID,
|
||||||
|
UserID: req.UserID,
|
||||||
|
Week: change.Week,
|
||||||
|
DayOfWeek: change.DayOfWeek,
|
||||||
|
Section: section,
|
||||||
|
Status: scheduleStatusNormal,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
if err := tx.WithContext(ctx).Create(&schedules).Error; err != nil {
|
||||||
|
return nil, nil, newApplyError(ErrorCodeDBError, "写入 schedules 失败", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
scheduleIDs := make([]int, 0, len(schedules))
|
||||||
|
for _, schedule := range schedules {
|
||||||
|
scheduleIDs = append(scheduleIDs, schedule.ID)
|
||||||
|
}
|
||||||
|
return []int{event.ID}, scheduleIDs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type makeupTarget struct {
|
||||||
|
Name string
|
||||||
|
TaskSourceType string
|
||||||
|
RelID int
|
||||||
|
MakeupForEventID int
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveMakeupTarget(ctx context.Context, tx *gorm.DB, userID int, change normalizedChange) (makeupTarget, error) {
|
||||||
|
makeupForEventID := parsePositiveInt(change.Metadata["makeup_for_event_id"])
|
||||||
|
if change.TargetType == "" || change.TargetType == TargetTypeScheduleEvent {
|
||||||
|
if change.TargetID > 0 {
|
||||||
|
makeupForEventID = change.TargetID
|
||||||
|
}
|
||||||
|
return resolveMakeupFromEvent(ctx, tx, userID, makeupForEventID)
|
||||||
|
}
|
||||||
|
if makeupForEventID <= 0 {
|
||||||
|
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "create_makeup 必须提供 makeup_for_event_id", nil)
|
||||||
|
}
|
||||||
|
if _, err := lockScheduleEvent(ctx, tx, userID, makeupForEventID); err != nil {
|
||||||
|
return makeupTarget{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
switch change.TargetType {
|
||||||
|
case TargetTypeTaskPool:
|
||||||
|
task, err := lockTaskPool(ctx, tx, userID, change.TargetID)
|
||||||
|
if err != nil {
|
||||||
|
return makeupTarget{}, err
|
||||||
|
}
|
||||||
|
if task.IsCompleted {
|
||||||
|
return makeupTarget{}, newApplyError(ErrorCodeTargetCompleted, "补做目标 task_pool 已完成", nil)
|
||||||
|
}
|
||||||
|
return makeupTarget{
|
||||||
|
Name: nonEmpty(task.Title, fmt.Sprintf("任务 %d", task.ID)),
|
||||||
|
TaskSourceType: TaskSourceTypeTaskPool,
|
||||||
|
RelID: task.ID,
|
||||||
|
MakeupForEventID: makeupForEventID,
|
||||||
|
}, nil
|
||||||
|
case TargetTypeTaskItem:
|
||||||
|
item, err := lockTaskItemForUser(ctx, tx, userID, change.TargetID)
|
||||||
|
if err != nil {
|
||||||
|
return makeupTarget{}, err
|
||||||
|
}
|
||||||
|
return makeupTarget{
|
||||||
|
Name: nonEmpty(stringPtrValue(item.Content), fmt.Sprintf("任务块 %d", item.ID)),
|
||||||
|
TaskSourceType: TaskSourceTypeTaskItem,
|
||||||
|
RelID: item.ID,
|
||||||
|
MakeupForEventID: makeupForEventID,
|
||||||
|
}, nil
|
||||||
|
default:
|
||||||
|
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "create_makeup 目标类型不合法", nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resolveMakeupFromEvent(ctx context.Context, tx *gorm.DB, userID, eventID int) (makeupTarget, error) {
|
||||||
|
event, err := lockScheduleEvent(ctx, tx, userID, eventID)
|
||||||
|
if err != nil {
|
||||||
|
return makeupTarget{}, err
|
||||||
|
}
|
||||||
|
if event.Type != scheduleEventTypeTask || event.RelID == nil || *event.RelID <= 0 {
|
||||||
|
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "补做来源必须是已排任务日程", nil)
|
||||||
|
}
|
||||||
|
sourceType := event.TaskSourceType
|
||||||
|
if sourceType == "" {
|
||||||
|
sourceType = TaskSourceTypeTaskItem
|
||||||
|
}
|
||||||
|
if sourceType != TaskSourceTypeTaskItem && sourceType != TaskSourceTypeTaskPool {
|
||||||
|
return makeupTarget{}, newApplyError(ErrorCodeInvalidEditedChanges, "补做来源任务类型不合法", nil)
|
||||||
|
}
|
||||||
|
return makeupTarget{
|
||||||
|
Name: nonEmpty(event.Name, fmt.Sprintf("补做任务 %d", event.ID)),
|
||||||
|
TaskSourceType: sourceType,
|
||||||
|
RelID: *event.RelID,
|
||||||
|
MakeupForEventID: event.ID,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func lockScheduleEvent(ctx context.Context, tx *gorm.DB, userID, eventID int) (model.ScheduleEvent, error) {
|
||||||
|
if eventID <= 0 {
|
||||||
|
return model.ScheduleEvent{}, newApplyError(ErrorCodeInvalidEditedChanges, "makeup_for_event_id 不能为空", nil)
|
||||||
|
}
|
||||||
|
var event model.ScheduleEvent
|
||||||
|
err := tx.WithContext(ctx).
|
||||||
|
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||||
|
Where("id = ? AND user_id = ?", eventID, userID).
|
||||||
|
First(&event).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return model.ScheduleEvent{}, newApplyError(ErrorCodeTargetNotFound, "补做来源日程不存在或不属于当前用户", nil)
|
||||||
|
}
|
||||||
|
return model.ScheduleEvent{}, newApplyError(ErrorCodeDBError, "读取补做来源日程失败", err)
|
||||||
|
}
|
||||||
|
return event, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func lockTaskItemForUser(ctx context.Context, tx *gorm.DB, userID, taskItemID int) (model.TaskClassItem, error) {
|
||||||
|
var item model.TaskClassItem
|
||||||
|
err := tx.WithContext(ctx).
|
||||||
|
Table("task_items").
|
||||||
|
Select("task_items.*").
|
||||||
|
Joins("JOIN task_classes ON task_classes.id = task_items.category_id").
|
||||||
|
Clauses(clause.Locking{Strength: "UPDATE"}).
|
||||||
|
Where("task_items.id = ? AND task_classes.user_id = ?", taskItemID, userID).
|
||||||
|
First(&item).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return model.TaskClassItem{}, newApplyError(ErrorCodeTargetNotFound, "task_item 不存在或不属于当前用户", nil)
|
||||||
|
}
|
||||||
|
return model.TaskClassItem{}, newApplyError(ErrorCodeDBError, "读取 task_item 失败", err)
|
||||||
|
}
|
||||||
|
return item, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parsePositiveInt(value string) int {
|
||||||
|
parsed, err := strconv.Atoi(strings.TrimSpace(value))
|
||||||
|
if err != nil || parsed <= 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return parsed
|
||||||
|
}
|
||||||
|
|
||||||
|
func nonEmpty(value, fallback string) string {
|
||||||
|
if strings.TrimSpace(value) == "" {
|
||||||
|
return fallback
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func stringPtrValue(value *string) string {
|
||||||
|
if value == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return *value
|
||||||
|
}
|
||||||
|
|
||||||
|
func classifyDBError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var applyErr *ApplyError
|
||||||
|
if errors.As(err, &applyErr) {
|
||||||
|
return applyErr
|
||||||
|
}
|
||||||
|
message := strings.ToLower(err.Error())
|
||||||
|
if strings.Contains(message, "duplicate entry") ||
|
||||||
|
strings.Contains(message, "unique constraint") ||
|
||||||
|
strings.Contains(message, "unique violation") ||
|
||||||
|
strings.Contains(message, "idx_user_slot_atomic") {
|
||||||
|
return newApplyError(ErrorCodeSlotConflict, "目标节次已被其他日程占用", err)
|
||||||
|
}
|
||||||
|
return newApplyError(ErrorCodeDBError, "主动调度正式写库失败", err)
|
||||||
|
}
|
||||||
127
backend/services/schedule/core/applyadapter/types.go
Normal file
127
backend/services/schedule/core/applyadapter/types.go
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
package applyadapter
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
ChangeTypeAddTaskPoolToSchedule = "add_task_pool_to_schedule"
|
||||||
|
ChangeTypeCreateMakeup = "create_makeup"
|
||||||
|
|
||||||
|
changeTypeAdd = "add"
|
||||||
|
|
||||||
|
TargetTypeTaskPool = "task_pool"
|
||||||
|
TargetTypeTaskItem = "task_item"
|
||||||
|
TargetTypeScheduleEvent = "schedule_event"
|
||||||
|
|
||||||
|
scheduleEventTypeTask = "task"
|
||||||
|
scheduleStatusNormal = "normal"
|
||||||
|
|
||||||
|
TaskSourceTypeTaskPool = "task_pool"
|
||||||
|
TaskSourceTypeTaskItem = "task_item"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
ErrorCodeInvalidRequest = "invalid_request"
|
||||||
|
ErrorCodeUnsupportedChangeType = "unsupported_change_type"
|
||||||
|
ErrorCodeTargetNotFound = "target_not_found"
|
||||||
|
ErrorCodeTargetCompleted = "target_completed"
|
||||||
|
ErrorCodeTargetAlreadyScheduled = "target_already_scheduled"
|
||||||
|
ErrorCodeSlotConflict = "slot_conflict"
|
||||||
|
ErrorCodeInvalidEditedChanges = "invalid_edited_changes"
|
||||||
|
ErrorCodeDBError = "db_error"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ApplyActiveScheduleRequest 是主动调度确认后交给 schedule 域的正式写库请求。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只承载已经由上游 preview/confirm 校验过的用户、候选和变更事实;
|
||||||
|
// 2. 不负责表达 preview 状态回写,adapter 成功后仅返回正式落库 ID;
|
||||||
|
// 3. Changes 可以来自原始 preview_changes,也可以来自用户编辑后的 edited_changes。
|
||||||
|
type ApplyActiveScheduleRequest struct {
|
||||||
|
PreviewID string
|
||||||
|
ApplyID string
|
||||||
|
UserID int
|
||||||
|
CandidateID string
|
||||||
|
Changes []ApplyChange
|
||||||
|
RequestedAt time.Time
|
||||||
|
TraceID string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyChange 是 apply adapter 可执行的最小变更单元。
|
||||||
|
//
|
||||||
|
// 字段语义:
|
||||||
|
// 1. ChangeType 支持 add_task_pool_to_schedule / create_makeup;
|
||||||
|
// 2. TargetType + TargetID 描述要落库的任务来源或原日程块;
|
||||||
|
// 3. ToSlot 是最终确认后的落位节次,adapter 不信任调用方的冲突判断,会在事务内重查。
|
||||||
|
type ApplyChange struct {
|
||||||
|
ChangeID string
|
||||||
|
ChangeType string
|
||||||
|
TargetType string
|
||||||
|
TargetID int
|
||||||
|
ToSlot *SlotSpan
|
||||||
|
DurationSections int
|
||||||
|
Metadata map[string]string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slot 描述 schedules 表的一格原子节次坐标。
|
||||||
|
type Slot struct {
|
||||||
|
Week int
|
||||||
|
DayOfWeek int
|
||||||
|
Section int
|
||||||
|
}
|
||||||
|
|
||||||
|
// SlotSpan 描述一个连续节次块。
|
||||||
|
//
|
||||||
|
// 说明:
|
||||||
|
// 1. Start 必填;
|
||||||
|
// 2. End 可由 DurationSections 推导,但调用方传入时必须与 Start 同周同日且连续;
|
||||||
|
// 3. DurationSections 小于等于 0 时,adapter 会按 Start/End 计算。
|
||||||
|
type SlotSpan struct {
|
||||||
|
Start Slot
|
||||||
|
End Slot
|
||||||
|
DurationSections int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyActiveScheduleResult 是正式日程写库结果。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. AppliedEventIDs 返回本次新建的 schedule_events.id;
|
||||||
|
// 2. AppliedScheduleIDs 返回本次新建的 schedules.id;
|
||||||
|
// 3. 不包含 preview apply_status,避免 adapter 越权回写 active_schedule_previews。
|
||||||
|
type ApplyActiveScheduleResult struct {
|
||||||
|
ApplyID string
|
||||||
|
AppliedEventIDs []int
|
||||||
|
AppliedScheduleIDs []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyError 是 adapter 返回给上游的可分类业务错误。
|
||||||
|
//
|
||||||
|
// 说明:
|
||||||
|
// 1. Code 用于上游决定 preview apply_error / 交互文案;
|
||||||
|
// 2. Cause 保留底层错误,便于日志排障;
|
||||||
|
// 3. Error() 面向调用方,保持中文可读。
|
||||||
|
type ApplyError struct {
|
||||||
|
Code string
|
||||||
|
Message string
|
||||||
|
Cause error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ApplyError) Error() string {
|
||||||
|
if e == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
if e.Cause == nil {
|
||||||
|
return e.Message
|
||||||
|
}
|
||||||
|
return e.Message + ": " + e.Cause.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ApplyError) Unwrap() error {
|
||||||
|
if e == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return e.Cause
|
||||||
|
}
|
||||||
|
|
||||||
|
func newApplyError(code, message string, cause error) error {
|
||||||
|
return &ApplyError{Code: code, Message: message, Cause: cause}
|
||||||
|
}
|
||||||
76
backend/services/schedule/dao/connect.go
Normal file
76
backend/services/schedule/dao/connect.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/model"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
"github.com/spf13/viper"
|
||||||
|
"gorm.io/driver/mysql"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// OpenDBFromConfig 创建 schedule 服务自己的数据库句柄。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只迁移 schedule_events / schedules 这两个正式日程写模型;
|
||||||
|
// 2. 不迁移 task、task-class、course 或 active-scheduler 的表,避免 schedule 服务越权管理其它领域;
|
||||||
|
// 3. 迁移期仍检查 task/task-class 依赖表是否存在,方便启动阶段暴露部署顺序问题。
|
||||||
|
func OpenDBFromConfig() (*gorm.DB, error) {
|
||||||
|
host := viper.GetString("database.host")
|
||||||
|
port := viper.GetString("database.port")
|
||||||
|
user := viper.GetString("database.user")
|
||||||
|
password := viper.GetString("database.password")
|
||||||
|
dbname := viper.GetString("database.dbname")
|
||||||
|
|
||||||
|
dsn := fmt.Sprintf(
|
||||||
|
"%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||||
|
user, password, host, port, dbname,
|
||||||
|
)
|
||||||
|
|
||||||
|
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = db.AutoMigrate(&model.ScheduleEvent{}, &model.Schedule{}); err != nil {
|
||||||
|
return nil, fmt.Errorf("auto migrate schedule tables failed: %w", err)
|
||||||
|
}
|
||||||
|
if err = ensureRuntimeDependencyTables(db); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return db, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenRedisFromConfig 创建 schedule 服务自己的 Redis 句柄。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只负责初始化 schedule 读缓存所需 Redis client;
|
||||||
|
// 2. 不创建、不清理任何业务 key;
|
||||||
|
// 3. Ping 失败直接返回错误,避免读缓存链路静默降级成难排查的启动问题。
|
||||||
|
func OpenRedisFromConfig() (*redis.Client, error) {
|
||||||
|
client := redis.NewClient(&redis.Options{
|
||||||
|
Addr: viper.GetString("redis.host") + ":" + viper.GetString("redis.port"),
|
||||||
|
Password: viper.GetString("redis.password"),
|
||||||
|
DB: 0,
|
||||||
|
})
|
||||||
|
if _, err := client.Ping(context.Background()).Result(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return client, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureRuntimeDependencyTables 显式检查迁移期仍需读取或锁定的外部表。
|
||||||
|
//
|
||||||
|
// 说明:
|
||||||
|
// 1. task_classes / task_items 支撑智能粗排、任务块撤销和补做目标校验;
|
||||||
|
// 2. tasks 支撑 active-scheduler confirm 的 task_pool 锁定;
|
||||||
|
// 3. 下一轮拆 task / task-class 后,应把这些依赖改为 RPC 或 read model,再从这里移除。
|
||||||
|
func ensureRuntimeDependencyTables(db *gorm.DB) error {
|
||||||
|
for _, table := range []string{"task_classes", "task_items", "tasks"} {
|
||||||
|
if !db.Migrator().HasTable(table) {
|
||||||
|
return fmt.Errorf("schedule runtime dependency table missing: %s", table)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
256
backend/services/schedule/dao/facts.go
Normal file
256
backend/services/schedule/dao/facts.go
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/conv"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/model"
|
||||||
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetScheduleFactsByWindow 读取主动调度所需的滚动窗口日程事实。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只读取 schedule_events / schedules 并转换为跨进程 facts;
|
||||||
|
// 2. 不生成候选、不写 preview、不写正式日程;
|
||||||
|
// 3. active-scheduler 通过 RPC 调用该能力后,不再直连 schedule 表读取窗口事实。
|
||||||
|
func (d *ScheduleDAO) GetScheduleFactsByWindow(ctx context.Context, req schedulecontracts.ScheduleWindowRequest) (schedulecontracts.ScheduleWindowFacts, error) {
|
||||||
|
if d == nil || d.db == nil {
|
||||||
|
return schedulecontracts.ScheduleWindowFacts{}, errors.New("schedule dao 未初始化")
|
||||||
|
}
|
||||||
|
if req.UserID <= 0 || req.WindowStart.IsZero() || !req.WindowEnd.After(req.WindowStart) {
|
||||||
|
return schedulecontracts.ScheduleWindowFacts{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
windowSlots, err := buildWindowSlots(req.WindowStart, req.WindowEnd)
|
||||||
|
if err != nil {
|
||||||
|
return schedulecontracts.ScheduleWindowFacts{}, err
|
||||||
|
}
|
||||||
|
weeks := uniqueWeeks(windowSlots)
|
||||||
|
|
||||||
|
var schedules []model.Schedule
|
||||||
|
if len(weeks) > 0 {
|
||||||
|
err = d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Where("user_id = ? AND week IN ?", req.UserID, weeks).
|
||||||
|
Find(&schedules).Error
|
||||||
|
if err != nil {
|
||||||
|
return schedulecontracts.ScheduleWindowFacts{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
occupiedByKey := make(map[string]model.Schedule, len(schedules))
|
||||||
|
eventFacts := make(map[int]*schedulecontracts.ScheduleEventFact)
|
||||||
|
targetAlreadyScheduled := false
|
||||||
|
for _, schedule := range schedules {
|
||||||
|
if schedule.Event == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
slot, ok := slotFromSchedule(schedule)
|
||||||
|
if !ok || slot.StartAt.Before(req.WindowStart) || !slot.StartAt.Before(req.WindowEnd) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
occupiedByKey[slotKey(slot)] = schedule
|
||||||
|
eventFact := eventFacts[schedule.EventID]
|
||||||
|
if eventFact == nil {
|
||||||
|
eventFact = scheduleToEventFact(schedule)
|
||||||
|
eventFacts[schedule.EventID] = eventFact
|
||||||
|
}
|
||||||
|
eventFact.Slots = append(eventFact.Slots, slot)
|
||||||
|
if isSameTarget(schedule.Event, req.TargetType, req.TargetID) {
|
||||||
|
targetAlreadyScheduled = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
occupiedSlots := make([]schedulecontracts.Slot, 0, len(occupiedByKey))
|
||||||
|
freeSlots := make([]schedulecontracts.Slot, 0, len(windowSlots))
|
||||||
|
for _, slot := range windowSlots {
|
||||||
|
if schedule, exists := occupiedByKey[slotKey(slot)]; exists {
|
||||||
|
occupied, ok := slotFromSchedule(schedule)
|
||||||
|
if ok {
|
||||||
|
occupiedSlots = append(occupiedSlots, occupied)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
freeSlots = append(freeSlots, slot)
|
||||||
|
}
|
||||||
|
|
||||||
|
events := make([]schedulecontracts.ScheduleEventFact, 0, len(eventFacts))
|
||||||
|
for _, fact := range eventFacts {
|
||||||
|
events = append(events, *fact)
|
||||||
|
}
|
||||||
|
return schedulecontracts.ScheduleWindowFacts{
|
||||||
|
Events: events,
|
||||||
|
OccupiedSlots: occupiedSlots,
|
||||||
|
FreeSlots: freeSlots,
|
||||||
|
NextDynamicTask: firstDynamicTask(events, req.Now),
|
||||||
|
TargetAlreadyScheduled: targetAlreadyScheduled,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFeedbackSignal 读取主动调度 unfinished_feedback 的日程目标事实。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 第一版没有独立 feedback 表,因此只在 target_type=schedule_event 时定位日程事件;
|
||||||
|
// 2. 目标缺失时返回 found=true + TargetKnown=false,让 active-scheduler 稳定追问用户;
|
||||||
|
// 3. 不修改 schedule,也不写 active-scheduler 会话状态。
|
||||||
|
func (d *ScheduleDAO) GetFeedbackSignal(ctx context.Context, req schedulecontracts.FeedbackRequest) (schedulecontracts.FeedbackFact, bool, error) {
|
||||||
|
if d == nil || d.db == nil {
|
||||||
|
return schedulecontracts.FeedbackFact{}, false, errors.New("schedule dao 未初始化")
|
||||||
|
}
|
||||||
|
if req.TargetType != schedulecontracts.TargetTypeScheduleEvent || req.TargetID <= 0 {
|
||||||
|
return unknownFeedbackTarget(req), true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var event model.ScheduleEvent
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Where("id = ? AND user_id = ?", req.TargetID, req.UserID).
|
||||||
|
First(&event).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return unknownFeedbackTarget(req), true, nil
|
||||||
|
}
|
||||||
|
return schedulecontracts.FeedbackFact{}, false, err
|
||||||
|
}
|
||||||
|
taskItemID := 0
|
||||||
|
if event.RelID != nil {
|
||||||
|
taskItemID = *event.RelID
|
||||||
|
}
|
||||||
|
return schedulecontracts.FeedbackFact{
|
||||||
|
FeedbackID: firstNonEmpty(req.FeedbackID, req.IdempotencyKey),
|
||||||
|
TargetKnown: true,
|
||||||
|
TargetEventID: event.ID,
|
||||||
|
TargetTaskItemID: taskItemID,
|
||||||
|
TargetTitle: event.Name,
|
||||||
|
SubmittedAt: time.Now(),
|
||||||
|
}, true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildWindowSlots(startAt, endAt time.Time) ([]schedulecontracts.Slot, error) {
|
||||||
|
slots := make([]schedulecontracts.Slot, 0, 24)
|
||||||
|
for day := truncateToDate(startAt); day.Before(endAt); day = day.AddDate(0, 0, 1) {
|
||||||
|
week, dayOfWeek, err := conv.RealDateToRelativeDate(day.Format(conv.DateFormat))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for section := 1; section <= 12; section++ {
|
||||||
|
sectionStart, sectionEnd, err := conv.RelativeTimeToRealTime(week, dayOfWeek, section, section)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if sectionStart.Before(startAt) || !sectionStart.Before(endAt) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
slots = append(slots, schedulecontracts.Slot{
|
||||||
|
Week: week,
|
||||||
|
DayOfWeek: dayOfWeek,
|
||||||
|
Section: section,
|
||||||
|
StartAt: sectionStart,
|
||||||
|
EndAt: sectionEnd,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return slots, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func slotFromSchedule(schedule model.Schedule) (schedulecontracts.Slot, bool) {
|
||||||
|
startAt, endAt, err := conv.RelativeTimeToRealTime(schedule.Week, schedule.DayOfWeek, schedule.Section, schedule.Section)
|
||||||
|
if err != nil {
|
||||||
|
return schedulecontracts.Slot{}, false
|
||||||
|
}
|
||||||
|
return schedulecontracts.Slot{
|
||||||
|
Week: schedule.Week,
|
||||||
|
DayOfWeek: schedule.DayOfWeek,
|
||||||
|
Section: schedule.Section,
|
||||||
|
StartAt: startAt,
|
||||||
|
EndAt: endAt,
|
||||||
|
}, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func scheduleToEventFact(schedule model.Schedule) *schedulecontracts.ScheduleEventFact {
|
||||||
|
event := schedule.Event
|
||||||
|
relID := 0
|
||||||
|
if event.RelID != nil {
|
||||||
|
relID = *event.RelID
|
||||||
|
}
|
||||||
|
sourceType := event.TaskSourceType
|
||||||
|
if sourceType == "" && event.Type == "task" {
|
||||||
|
sourceType = schedulecontracts.TargetTypeTaskItem
|
||||||
|
}
|
||||||
|
return &schedulecontracts.ScheduleEventFact{
|
||||||
|
ID: event.ID,
|
||||||
|
UserID: event.UserID,
|
||||||
|
Title: event.Name,
|
||||||
|
SourceType: sourceType,
|
||||||
|
RelID: relID,
|
||||||
|
IsDynamicTask: event.Type == "task",
|
||||||
|
TaskItemID: relID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSameTarget(event *model.ScheduleEvent, targetType string, targetID int) bool {
|
||||||
|
if event == nil || targetID <= 0 || event.RelID == nil || event.Type != "task" {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
sourceType := event.TaskSourceType
|
||||||
|
if sourceType == "" {
|
||||||
|
sourceType = schedulecontracts.TargetTypeTaskItem
|
||||||
|
}
|
||||||
|
return sourceType == targetType && *event.RelID == targetID
|
||||||
|
}
|
||||||
|
|
||||||
|
func firstDynamicTask(events []schedulecontracts.ScheduleEventFact, now time.Time) *schedulecontracts.ScheduleEventFact {
|
||||||
|
for i := range events {
|
||||||
|
if !events[i].IsDynamicTask {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, slot := range events[i].Slots {
|
||||||
|
if slot.StartAt.IsZero() || !slot.StartAt.Before(now) {
|
||||||
|
return &events[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func uniqueWeeks(slots []schedulecontracts.Slot) []int {
|
||||||
|
seen := make(map[int]struct{})
|
||||||
|
weeks := make([]int, 0)
|
||||||
|
for _, slot := range slots {
|
||||||
|
if _, exists := seen[slot.Week]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[slot.Week] = struct{}{}
|
||||||
|
weeks = append(weeks, slot.Week)
|
||||||
|
}
|
||||||
|
return weeks
|
||||||
|
}
|
||||||
|
|
||||||
|
func slotKey(slot schedulecontracts.Slot) string {
|
||||||
|
return fmt.Sprintf("%d:%d:%d", slot.Week, slot.DayOfWeek, slot.Section)
|
||||||
|
}
|
||||||
|
|
||||||
|
func truncateToDate(t time.Time) time.Time {
|
||||||
|
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
|
||||||
|
}
|
||||||
|
|
||||||
|
func unknownFeedbackTarget(req schedulecontracts.FeedbackRequest) schedulecontracts.FeedbackFact {
|
||||||
|
return schedulecontracts.FeedbackFact{
|
||||||
|
FeedbackID: firstNonEmpty(req.FeedbackID, req.IdempotencyKey),
|
||||||
|
TargetKnown: false,
|
||||||
|
SubmittedAt: time.Now(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func firstNonEmpty(values ...string) string {
|
||||||
|
for _, value := range values {
|
||||||
|
if value != "" {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
627
backend/services/schedule/dao/schedule.go
Normal file
627
backend/services/schedule/dao/schedule.go
Normal file
@@ -0,0 +1,627 @@
|
|||||||
|
package dao
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/model"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/respond"
|
||||||
|
"gorm.io/gorm"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ScheduleDAO struct {
|
||||||
|
db *gorm.DB
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewScheduleDAO 创建TaskClassDAO实例
|
||||||
|
func NewScheduleDAO(db *gorm.DB) *ScheduleDAO {
|
||||||
|
return &ScheduleDAO{
|
||||||
|
db: db,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) WithTx(tx *gorm.DB) *ScheduleDAO {
|
||||||
|
return &ScheduleDAO{db: tx}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) AddSchedules(schedules []model.Schedule) ([]int, error) {
|
||||||
|
if err := d.db.Create(&schedules).Error; err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids := make([]int, len(schedules))
|
||||||
|
for i, s := range schedules {
|
||||||
|
ids[i] = s.ID
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) EmbedTaskIntoSchedule(startSection, endSection, dayOfWeek, week, userID, taskID int) error {
|
||||||
|
// 仅更新指定:用户/周/星期/节次区间 的记录,将 embedded_task_id 精准写入 taskID
|
||||||
|
res := d.db.
|
||||||
|
Table("schedules").
|
||||||
|
Where("user_id = ? AND week = ? AND day_of_week = ? AND section BETWEEN ? AND ?", userID, week, dayOfWeek, startSection, endSection).
|
||||||
|
Update("embedded_task_id", taskID)
|
||||||
|
|
||||||
|
return res.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetCourseUserIDByID(ctx context.Context, courseScheduleEventID int) (int, error) {
|
||||||
|
type row struct {
|
||||||
|
UserID *int `gorm:"column:user_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var r row
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedule_events").
|
||||||
|
Select("user_id").
|
||||||
|
Where("id = ?", courseScheduleEventID).
|
||||||
|
First(&r).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return 0, respond.WrongCourseID
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if r.UserID == nil {
|
||||||
|
return 0, respond.WrongCourseID
|
||||||
|
}
|
||||||
|
return *r.UserID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsCourseEmbeddedByOtherTaskBlock 判断课程在给定节次区间内是否已被其他任务块嵌入(用于业务限制)
|
||||||
|
func (d *ScheduleDAO) IsCourseEmbeddedByOtherTaskBlock(ctx context.Context, courseID, startSection, endSection int) (bool, error) {
|
||||||
|
// 若区间非法,视为不冲突
|
||||||
|
if startSection <= 0 || endSection <= 0 || startSection > endSection {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var cnt int64
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Where("id = ?", courseID).
|
||||||
|
Where("section BETWEEN ? AND ?", startSection, endSection).
|
||||||
|
Where("embedded_task_id IS NOT NULL AND embedded_task_id <> 0").
|
||||||
|
Count(&cnt).Error
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return cnt > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) HasUserScheduleConflict(ctx context.Context, userID, week, dayOfWeek int, sections []int) (bool, error) {
|
||||||
|
// 无节次则视为无冲突
|
||||||
|
if len(sections) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
// 统计同一用户、同一周、同一天、且节次有交集的排程数量
|
||||||
|
// 约定表字段:user_id, week, day_of_week, section
|
||||||
|
var cnt int64
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Where("user_id = ? AND week = ? AND day_of_week = ?", userID, week, dayOfWeek).
|
||||||
|
Where("section IN ?", sections).
|
||||||
|
Count(&cnt).Error
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return cnt > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) IsCourseTimeMatch(ctx context.Context, courseScheduleEventID, week, dayOfWeek, startSection, endSection int) (bool, error) {
|
||||||
|
// 区间非法直接不匹配
|
||||||
|
if startSection <= 0 || endSection <= 0 || startSection > endSection {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 核对该课程事件在指定 周\+星期 下,是否存在覆盖整个节次区间的排程记录
|
||||||
|
// 说明:此处按你当前表结构的用法(schedule\_events 存事件,schedules 存节次明细)来写:
|
||||||
|
// schedules 里通过 schedule\_event\_id 关联到 schedule\_events.id
|
||||||
|
var cnt int64
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Where("event_id = ?", courseScheduleEventID).
|
||||||
|
Where("week = ? AND day_of_week = ?", week, dayOfWeek).
|
||||||
|
Where("section BETWEEN ? AND ?", startSection, endSection).
|
||||||
|
Count(&cnt).Error
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 需要区间内的每一节都存在记录才算匹配
|
||||||
|
return cnt == int64(endSection-startSection+1), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) AddScheduleEvent(scheduleEvent *model.ScheduleEvent) (int, error) {
|
||||||
|
if err := d.db.Create(&scheduleEvent).Error; err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return scheduleEvent.ID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CheckScheduleConflict 检查给定的 Schedule 切片中是否存在课程的冲突(即同一用户、同一周、同一天、且节次有交集的记录,并且只管课程,不管其它任务类型)
|
||||||
|
func (d *ScheduleDAO) CheckScheduleConflict(ctx context.Context, schedules []model.Schedule) (bool, error) {
|
||||||
|
if len(schedules) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 聚合:同一 user/week/day 的节次去重后一次性查库
|
||||||
|
type key struct {
|
||||||
|
UserID int
|
||||||
|
Week int
|
||||||
|
DayOfWeek int
|
||||||
|
}
|
||||||
|
groups := make(map[key]map[int]struct{})
|
||||||
|
|
||||||
|
for _, s := range schedules {
|
||||||
|
// 基础字段不合法直接跳过(按不冲突处理)
|
||||||
|
if s.UserID <= 0 || s.Week <= 0 || s.DayOfWeek <= 0 || s.Section <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
k := key{UserID: s.UserID, Week: s.Week, DayOfWeek: s.DayOfWeek}
|
||||||
|
if _, ok := groups[k]; !ok {
|
||||||
|
groups[k] = make(map[int]struct{})
|
||||||
|
}
|
||||||
|
groups[k][s.Section] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, set := range groups {
|
||||||
|
if len(set) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sections := make([]int, 0, len(set))
|
||||||
|
for sec := range set {
|
||||||
|
sections = append(sections, sec)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 仅判断“课程(type=course)”是否冲突:
|
||||||
|
// schedules.event_id -> schedule_events.id,再用 schedule_events.type 过滤
|
||||||
|
var cnt int64
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules s").
|
||||||
|
Joins("JOIN schedule_events e ON e.id = s.event_id").
|
||||||
|
Where("s.user_id = ? AND s.week = ? AND s.day_of_week = ?", k.UserID, k.Week, k.DayOfWeek).
|
||||||
|
Where("s.section IN ?", sections).
|
||||||
|
Where("e.type = ?", "course").
|
||||||
|
Count(&cnt).Error
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if cnt > 0 {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetNonCourseScheduleConflicts(ctx context.Context, newSchedules []model.Schedule) ([]model.Schedule, error) {
|
||||||
|
if len(newSchedules) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. 构建指纹图:用于快速比对坐标
|
||||||
|
userID := newSchedules[0].UserID
|
||||||
|
weeksMap := make(map[int]bool)
|
||||||
|
newSlotsFingerprints := make(map[string]bool)
|
||||||
|
|
||||||
|
for _, s := range newSchedules {
|
||||||
|
weeksMap[s.Week] = true
|
||||||
|
key := fmt.Sprintf("%d-%d-%d", s.Week, s.DayOfWeek, s.Section)
|
||||||
|
newSlotsFingerprints[key] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
weeks := make([]int, 0, len(weeksMap))
|
||||||
|
for w := range weeksMap {
|
||||||
|
weeks = append(weeks, w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 第一步:定义一个临时小结构体,精准捞取坐标和 EventID
|
||||||
|
type simpleSlot struct {
|
||||||
|
EventID int
|
||||||
|
Week int
|
||||||
|
DayOfWeek int
|
||||||
|
Section int
|
||||||
|
}
|
||||||
|
var candidates []simpleSlot
|
||||||
|
|
||||||
|
// 💡 这里的逻辑:只查索引覆盖到的字段,速度极快
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Select("schedules.event_id, schedules.week, schedules.day_of_week, schedules.section").
|
||||||
|
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
|
||||||
|
Where("schedules.user_id = ? AND schedules.week IN ? AND schedule_events.type != ?", userID, weeks, "course").
|
||||||
|
Scan(&candidates).Error
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. 筛选出真正碰撞的 EventID
|
||||||
|
eventIDMap := make(map[int]bool)
|
||||||
|
for _, s := range candidates {
|
||||||
|
key := fmt.Sprintf("%d-%d-%d", s.Week, s.DayOfWeek, s.Section)
|
||||||
|
if newSlotsFingerprints[key] {
|
||||||
|
eventIDMap[s.EventID] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(eventIDMap) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. 第二步:“抄全家”——根据碰撞到的 ID 捞出这些任务的所有原子槽位
|
||||||
|
var ids []int
|
||||||
|
for id := range eventIDMap {
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fullConflicts []model.Schedule
|
||||||
|
// 💡 关键:这里必须 Preload("Event"),这样 DTO 才有名称显示
|
||||||
|
err = d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Where("event_id IN ?", ids).
|
||||||
|
Find(&fullConflicts).Error
|
||||||
|
|
||||||
|
return fullConflicts, err
|
||||||
|
}
|
||||||
|
func (d *ScheduleDAO) GetUserTodaySchedule(ctx context.Context, userID, week, dayOfWeek int) ([]model.Schedule, error) {
|
||||||
|
var schedules []model.Schedule
|
||||||
|
|
||||||
|
// 1. Preload("Event"): 拿到课程/任务的基础信息(名、地、型)
|
||||||
|
// 2. Preload("EmbeddedTask"): 拿到“水课”里嵌入的具体任务详情
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Preload("EmbeddedTask").
|
||||||
|
Where("user_id = ? AND week = ? AND day_of_week = ?", userID, week, dayOfWeek).
|
||||||
|
Order("section ASC").
|
||||||
|
Find(&schedules).Error
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return schedules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetUserWeeklySchedule(ctx context.Context, userID, week int) ([]model.Schedule, error) {
|
||||||
|
var schedules []model.Schedule
|
||||||
|
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Preload("EmbeddedTask").
|
||||||
|
Where("user_id = ? AND week = ?", userID, week).
|
||||||
|
Order("day_of_week ASC, section ASC").
|
||||||
|
Find(&schedules).Error
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return schedules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) DeleteScheduleEventAndSchedule(ctx context.Context, eventID int, userID int) error {
|
||||||
|
return d.db.WithContext(ctx).Transaction(func(tx *gorm.DB) error {
|
||||||
|
// 先查出要删除的 schedules,让 GORM 在 Delete 时能带上模型字段(供钩子读取 UserID/Week)
|
||||||
|
var schedules []model.Schedule
|
||||||
|
if err := tx.
|
||||||
|
Where("event_id = ? AND user_id = ?", eventID, userID).
|
||||||
|
Find(&schedules).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 显式删子表 schedules(触发 schedules 的 GORM Delete 回调/插件)
|
||||||
|
if len(schedules) > 0 {
|
||||||
|
if err := tx.Delete(&schedules).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 再删父表 schedule_events(同样触发回调/插件)
|
||||||
|
res := tx.Where("id = ? AND user_id = ?", eventID, userID).
|
||||||
|
Delete(&model.ScheduleEvent{})
|
||||||
|
if res.Error != nil {
|
||||||
|
return res.Error
|
||||||
|
}
|
||||||
|
if res.RowsAffected == 0 {
|
||||||
|
return respond.WrongScheduleEventID
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetScheduleTypeByEventID(ctx context.Context, eventID, userID int) (string, error) {
|
||||||
|
type row struct {
|
||||||
|
Type *string `gorm:"column:type"`
|
||||||
|
}
|
||||||
|
var r row
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedule_events").
|
||||||
|
Select("type").
|
||||||
|
Where("id = ? AND user_id=?", eventID, userID).
|
||||||
|
First(&r).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return "", respond.WrongScheduleEventID // 事件不存在或不属于该用户,统一返回错误
|
||||||
|
}
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if r.Type == nil {
|
||||||
|
return "", respond.WrongScheduleEventID
|
||||||
|
}
|
||||||
|
return *r.Type, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetScheduleEmbeddedTaskID(ctx context.Context, eventID int) (int, error) {
|
||||||
|
// embedded_task_id 存在于 schedules 表中(按 event_id 聚合取一个非空值)
|
||||||
|
// 若该事件没有任何嵌入任务,则返回 0, nil
|
||||||
|
type row struct {
|
||||||
|
EmbeddedTaskID *int `gorm:"column:embedded_task_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
var r row
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Select("embedded_task_id").
|
||||||
|
Where("event_id = ?", eventID).
|
||||||
|
Where("embedded_task_id IS NOT NULL AND embedded_task_id <> 0").
|
||||||
|
Order("id ASC").
|
||||||
|
Limit(1).
|
||||||
|
Scan(&r).Error
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if r.EmbeddedTaskID == nil { // 没有任何嵌入任务
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return *r.EmbeddedTaskID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) IfScheduleEventIDExists(ctx context.Context, eventID int) (bool, error) {
|
||||||
|
var count int64
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedule_events").
|
||||||
|
Where("id = ?", eventID).
|
||||||
|
Count(&count).Error
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return count > 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) SetScheduleEmbeddedTaskIDToNull(ctx context.Context, eventID int) (int, error) {
|
||||||
|
// 先取出该事件当前嵌入的任务 id(若没有嵌入则返回对应业务错误)
|
||||||
|
embeddedTaskID, err := d.GetScheduleEmbeddedTaskID(ctx, eventID)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if embeddedTaskID == 0 {
|
||||||
|
return 0, respond.TargetScheduleNotHaveEmbeddedTask
|
||||||
|
}
|
||||||
|
|
||||||
|
// 将 schedules 表中指定 event_id 的 embedded_task_id 字段置空(用于解除嵌入关系)
|
||||||
|
res := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Where("event_id = ?", eventID).
|
||||||
|
Where("embedded_task_id IS NOT NULL AND embedded_task_id <> 0").
|
||||||
|
Update("embedded_task_id", nil)
|
||||||
|
if res.Error != nil {
|
||||||
|
return 0, res.Error
|
||||||
|
}
|
||||||
|
if res.RowsAffected == 0 {
|
||||||
|
return 0, respond.TargetScheduleNotHaveEmbeddedTask
|
||||||
|
}
|
||||||
|
return embeddedTaskID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) FindEmbeddedTaskIDAndDeleteIt(ctx context.Context, taskID int) (int, error) {
|
||||||
|
// 1. 先找到 schedules 表中 embedded_task_id = taskID 的记录,获取对应的 event_id
|
||||||
|
type row struct {
|
||||||
|
EventID *int `gorm:"column:event_id"`
|
||||||
|
}
|
||||||
|
var r row
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Select("event_id").
|
||||||
|
Where("embedded_task_id = ?", taskID).
|
||||||
|
Order("id ASC").
|
||||||
|
Limit(1).
|
||||||
|
Scan(&r).Error
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if r.EventID == nil {
|
||||||
|
return 0, respond.TargetTaskNotEmbeddedInAnySchedule
|
||||||
|
}
|
||||||
|
eventID := *r.EventID
|
||||||
|
|
||||||
|
// 2. 删除该 event_id 对应的课程事件(通过级联删除实现)
|
||||||
|
res := d.db.WithContext(ctx).
|
||||||
|
Table("schedule_events").
|
||||||
|
Where("id = ?", eventID).
|
||||||
|
Delete(&model.ScheduleEvent{})
|
||||||
|
if res.Error != nil {
|
||||||
|
return 0, res.Error
|
||||||
|
}
|
||||||
|
if res.RowsAffected == 0 {
|
||||||
|
return 0, respond.TargetTaskNotEmbeddedInAnySchedule
|
||||||
|
}
|
||||||
|
return eventID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) DeleteScheduleEventByTaskItemID(ctx context.Context, taskItemID int) error {
|
||||||
|
//直接找schedule_events表中type=task且rel_id=taskItemID的记录,删除它(级联删schedules)
|
||||||
|
res := d.db.WithContext(ctx).
|
||||||
|
Table("schedule_events").
|
||||||
|
Where("type = ? AND rel_id = ?", "task", taskItemID).
|
||||||
|
Delete(&model.ScheduleEvent{})
|
||||||
|
if res.Error != nil {
|
||||||
|
return res.Error
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetUserRecentCompletedSchedules(ctx context.Context, nowTime time.Time, userID int, index, limit int) ([]model.Schedule, error) {
|
||||||
|
var schedules []model.Schedule
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Preload("EmbeddedTask").
|
||||||
|
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
|
||||||
|
// 修改后的核心逻辑:
|
||||||
|
// 1. 用户匹配 & 已结束
|
||||||
|
// 2. 满足 (事件本身是任务) OR (虽然是课程但嵌入了任务)
|
||||||
|
Where("schedules.user_id = ? AND schedule_events.end_time < ? AND (schedule_events.type = ? OR schedules.embedded_task_id IS NOT NULL)",
|
||||||
|
userID, nowTime, "task").
|
||||||
|
Order("schedule_events.end_time DESC"). // 命中索引
|
||||||
|
Offset(index).
|
||||||
|
Limit(limit).
|
||||||
|
Find(&schedules).Error
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return schedules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetScheduleEventWeekByID(ctx context.Context, eventID int) (int, error) {
|
||||||
|
type row struct {
|
||||||
|
Week *int `gorm:"column:week"`
|
||||||
|
}
|
||||||
|
var r row
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Select("week").
|
||||||
|
Where("event_id = ?", eventID).
|
||||||
|
Order("id ASC").
|
||||||
|
Limit(1).
|
||||||
|
Scan(&r).Error
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if r.Week == nil {
|
||||||
|
return 0, respond.WrongScheduleEventID
|
||||||
|
}
|
||||||
|
return *r.Week, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetUserOngoingSchedule(ctx context.Context, userID int, nowTime time.Time) ([]model.Schedule, error) {
|
||||||
|
var schedules []model.Schedule
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Preload("EmbeddedTask").
|
||||||
|
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
|
||||||
|
Where("schedules.user_id = ? AND schedule_events.start_time <= ? AND schedule_events.end_time >= ?",
|
||||||
|
userID, nowTime, nowTime).
|
||||||
|
Or("schedules.user_id = ? AND schedule_events.start_time > ?",
|
||||||
|
userID, nowTime).
|
||||||
|
Order("schedule_events.start_time ASC"). // 命中索引
|
||||||
|
Find(&schedules).Error
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return schedules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) RevocateSchedulesByEventID(ctx context.Context, eventID int) error {
|
||||||
|
// 将 schedules 表中指定 event_id 的 embedded_task_id 字段置空(用于撤销嵌入关系)
|
||||||
|
res := d.db.WithContext(ctx).
|
||||||
|
Table("schedules").
|
||||||
|
Where("event_id = ?", eventID).
|
||||||
|
Update("status", "interrupted")
|
||||||
|
if res.RowsAffected == 0 {
|
||||||
|
return respond.WrongScheduleEventID
|
||||||
|
}
|
||||||
|
return res.Error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetRelIDByScheduleEventID(ctx context.Context, eventID int) (int, error) {
|
||||||
|
type row struct {
|
||||||
|
RelID *int `gorm:"column:rel_id"`
|
||||||
|
}
|
||||||
|
var r row
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Table("schedule_events").
|
||||||
|
Select("rel_id").
|
||||||
|
Where("id = ?", eventID).
|
||||||
|
First(&r).Error
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||||
|
return 0, respond.WrongScheduleEventID
|
||||||
|
}
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if r.RelID == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
return *r.RelID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) GetUserSchedulesByTimeRange(ctx context.Context, userID int, startTime, endTime time.Time) ([]model.Schedule, error) {
|
||||||
|
var schedules []model.Schedule
|
||||||
|
err := d.db.WithContext(ctx).
|
||||||
|
Preload("Event").
|
||||||
|
Preload("EmbeddedTask").
|
||||||
|
Joins("JOIN schedule_events ON schedule_events.id = schedules.event_id").
|
||||||
|
Where("schedules.user_id = ? AND schedule_events.start_time >= ? AND schedule_events.end_time <= ?",
|
||||||
|
userID, startTime, endTime).
|
||||||
|
Order("schedule_events.start_time ASC"). // 命中索引
|
||||||
|
Find(&schedules).Error
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return schedules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) BatchEmbedTaskIntoSchedule(ctx context.Context, eventIDs, taskItemIDs []int) error {
|
||||||
|
if len(eventIDs) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if len(eventIDs) != len(taskItemIDs) {
|
||||||
|
return fmt.Errorf("eventIDs length != taskItemIDs length")
|
||||||
|
}
|
||||||
|
|
||||||
|
db := d.db.WithContext(ctx)
|
||||||
|
|
||||||
|
for i, eventID := range eventIDs {
|
||||||
|
taskItemID := taskItemIDs[i]
|
||||||
|
|
||||||
|
// 1) 校验该 event 是否为 course
|
||||||
|
var typ string
|
||||||
|
if err := db.
|
||||||
|
Table("schedule_events").
|
||||||
|
Select("type").
|
||||||
|
Where("id = ?", eventID).
|
||||||
|
Scan(&typ).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if typ != "course" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2) 一 event 对多 schedules:批量写入 embedded_task_id
|
||||||
|
if err := db.
|
||||||
|
Table("schedules").
|
||||||
|
Where("event_id = ?", eventID).
|
||||||
|
Update("embedded_task_id", taskItemID).Error; err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *ScheduleDAO) InsertScheduleEvents(ctx context.Context, events []model.ScheduleEvent) ([]int, error) {
|
||||||
|
if len(events) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
if err := d.db.WithContext(ctx).Create(&events).Error; err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ids := make([]int, len(events))
|
||||||
|
for i, e := range events {
|
||||||
|
ids[i] = e.ID
|
||||||
|
}
|
||||||
|
return ids, nil
|
||||||
|
}
|
||||||
113
backend/services/schedule/rpc/errors.go
Normal file
113
backend/services/schedule/rpc/errors.go
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
package rpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/respond"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||||
|
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
scheduleErrorDomain = "smartflow.schedule"
|
||||||
|
scheduleApplyErrorDomain = "smartflow.schedule.apply"
|
||||||
|
)
|
||||||
|
|
||||||
|
// grpcErrorFromServiceError 负责把 schedule 内部错误转换为 gRPC status。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. apply 业务错误保留错误码,供 active-scheduler 反解后继续按原 confirm 语义处理;
|
||||||
|
// 2. respond.Response 继续传输项目内部 status/info;
|
||||||
|
// 3. 未分类错误只暴露通用内部错误,详细信息留在服务日志。
|
||||||
|
func grpcErrorFromServiceError(err error) error {
|
||||||
|
if err == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var applyErr *applyadapter.ApplyError
|
||||||
|
if errors.As(err, &applyErr) {
|
||||||
|
return grpcErrorFromApplyError(applyErr)
|
||||||
|
}
|
||||||
|
var resp respond.Response
|
||||||
|
if errors.As(err, &resp) {
|
||||||
|
return grpcErrorFromResponse(resp)
|
||||||
|
}
|
||||||
|
log.Printf("schedule rpc internal error: %v", err)
|
||||||
|
return status.Error(codes.Internal, "schedule service internal error")
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcErrorFromApplyError(applyErr *applyadapter.ApplyError) error {
|
||||||
|
if applyErr == nil {
|
||||||
|
return status.Error(codes.Internal, "schedule apply error")
|
||||||
|
}
|
||||||
|
message := strings.TrimSpace(applyErr.Message)
|
||||||
|
if message == "" {
|
||||||
|
message = strings.TrimSpace(applyErr.Code)
|
||||||
|
}
|
||||||
|
st := status.New(grpcCodeFromApplyErrorCode(applyErr.Code), message)
|
||||||
|
detail := &errdetails.ErrorInfo{
|
||||||
|
Domain: scheduleApplyErrorDomain,
|
||||||
|
Reason: applyErr.Code,
|
||||||
|
Metadata: map[string]string{
|
||||||
|
"info": message,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
withDetails, err := st.WithDetails(detail)
|
||||||
|
if err != nil {
|
||||||
|
return st.Err()
|
||||||
|
}
|
||||||
|
return withDetails.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcErrorFromResponse(resp respond.Response) error {
|
||||||
|
code := grpcCodeFromRespondStatus(resp.Status)
|
||||||
|
message := strings.TrimSpace(resp.Info)
|
||||||
|
if message == "" {
|
||||||
|
message = strings.TrimSpace(resp.Status)
|
||||||
|
}
|
||||||
|
st := status.New(code, message)
|
||||||
|
detail := &errdetails.ErrorInfo{
|
||||||
|
Domain: scheduleErrorDomain,
|
||||||
|
Reason: resp.Status,
|
||||||
|
Metadata: map[string]string{
|
||||||
|
"info": resp.Info,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
withDetails, err := st.WithDetails(detail)
|
||||||
|
if err != nil {
|
||||||
|
return st.Err()
|
||||||
|
}
|
||||||
|
return withDetails.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcCodeFromApplyErrorCode(code string) codes.Code {
|
||||||
|
switch strings.TrimSpace(code) {
|
||||||
|
case applyadapter.ErrorCodeTargetNotFound:
|
||||||
|
return codes.NotFound
|
||||||
|
case applyadapter.ErrorCodeDBError:
|
||||||
|
return codes.Internal
|
||||||
|
case applyadapter.ErrorCodeTargetCompleted,
|
||||||
|
applyadapter.ErrorCodeTargetAlreadyScheduled,
|
||||||
|
applyadapter.ErrorCodeSlotConflict:
|
||||||
|
return codes.FailedPrecondition
|
||||||
|
default:
|
||||||
|
return codes.InvalidArgument
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func grpcCodeFromRespondStatus(statusValue string) codes.Code {
|
||||||
|
switch strings.TrimSpace(statusValue) {
|
||||||
|
case respond.MissingToken.Status, respond.InvalidToken.Status, respond.InvalidClaims.Status,
|
||||||
|
respond.ErrUnauthorized.Status, respond.WrongTokenType.Status, respond.UserLoggedOut.Status:
|
||||||
|
return codes.Unauthenticated
|
||||||
|
case respond.MissingParam.Status, respond.WrongParamType.Status, respond.ParamTooLong.Status:
|
||||||
|
return codes.InvalidArgument
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(strings.TrimSpace(statusValue), "5") {
|
||||||
|
return codes.Internal
|
||||||
|
}
|
||||||
|
return codes.InvalidArgument
|
||||||
|
}
|
||||||
166
backend/services/schedule/rpc/handler.go
Normal file
166
backend/services/schedule/rpc/handler.go
Normal file
@@ -0,0 +1,166 @@
|
|||||||
|
package rpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/respond"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/services/schedule/rpc/pb"
|
||||||
|
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||||
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Handler struct {
|
||||||
|
pb.UnimplementedScheduleServer
|
||||||
|
svc *schedulesv.ScheduleService
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandler(svc *schedulesv.ScheduleService) *Handler {
|
||||||
|
return &Handler{svc: svc}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ping 供调用方在启动期确认 schedule zrpc 已可用。
|
||||||
|
func (h *Handler) Ping(ctx context.Context, req *pb.StatusResponse) (*pb.StatusResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &pb.StatusResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetToday(ctx context.Context, req *pb.UserRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := h.svc.GetUserTodaySchedule(ctx, int(req.UserId))
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetWeek(ctx context.Context, req *pb.WeekRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := h.svc.GetUserWeeklySchedule(ctx, int(req.UserId), int(req.Week))
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) DeleteEvents(ctx context.Context, req *pb.DeleteEventsRequest) (*pb.StatusResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var events []schedulecontracts.UserDeleteScheduleEvent
|
||||||
|
if err := json.Unmarshal(req.EventsJson, &events); err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(respond.WrongParamType)
|
||||||
|
}
|
||||||
|
err := h.svc.DeleteScheduleEventByContract(ctx, schedulecontracts.DeleteScheduleEventsRequest{
|
||||||
|
UserID: int(req.UserId),
|
||||||
|
Events: events,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(err)
|
||||||
|
}
|
||||||
|
return &pb.StatusResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetRecentCompleted(ctx context.Context, req *pb.RecentCompletedRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := h.svc.GetUserRecentCompletedSchedules(ctx, int(req.UserId), int(req.Index), int(req.Limit))
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetCurrent(ctx context.Context, req *pb.UserRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := h.svc.GetUserOngoingSchedule(ctx, int(req.UserId))
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) RevokeTaskItem(ctx context.Context, req *pb.RevokeTaskItemRequest) (*pb.StatusResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := h.svc.RevocateUserTaskClassItem(ctx, int(req.UserId), int(req.EventId)); err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(err)
|
||||||
|
}
|
||||||
|
return &pb.StatusResponse{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) SmartPlanning(ctx context.Context, req *pb.SmartPlanningRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
data, err := h.svc.SmartPlanning(ctx, int(req.UserId), int(req.TaskClassId))
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) SmartPlanningMulti(ctx context.Context, req *pb.SmartPlanningMultiRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
taskClassIDs := make([]int, 0, len(req.TaskClassIds))
|
||||||
|
for _, id := range req.TaskClassIds {
|
||||||
|
taskClassIDs = append(taskClassIDs, int(id))
|
||||||
|
}
|
||||||
|
data, err := h.svc.SmartPlanningMulti(ctx, int(req.UserId), taskClassIDs)
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetScheduleFactsByWindow(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var contractReq schedulecontracts.ScheduleWindowRequest
|
||||||
|
if err := json.Unmarshal(req.PayloadJson, &contractReq); err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(respond.WrongParamType)
|
||||||
|
}
|
||||||
|
data, err := h.svc.GetScheduleFactsByWindow(ctx, contractReq)
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) GetFeedbackSignal(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var contractReq schedulecontracts.FeedbackRequest
|
||||||
|
if err := json.Unmarshal(req.PayloadJson, &contractReq); err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(respond.WrongParamType)
|
||||||
|
}
|
||||||
|
feedback, found, err := h.svc.GetFeedbackSignal(ctx, contractReq)
|
||||||
|
return jsonResponse(schedulecontracts.FeedbackResponse{Feedback: feedback, Found: found}, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) ApplyActiveScheduleChanges(ctx context.Context, req *pb.JSONRequest) (*pb.JSONResponse, error) {
|
||||||
|
if err := h.ensureReady(req); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
var contractReq schedulecontracts.ApplyActiveScheduleRequest
|
||||||
|
if err := json.Unmarshal(req.PayloadJson, &contractReq); err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(respond.WrongParamType)
|
||||||
|
}
|
||||||
|
data, err := h.svc.ApplyActiveScheduleChanges(ctx, contractReq)
|
||||||
|
return jsonResponse(data, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handler) ensureReady(req any) error {
|
||||||
|
if h == nil || h.svc == nil {
|
||||||
|
return grpcErrorFromServiceError(errors.New("schedule service dependency not initialized"))
|
||||||
|
}
|
||||||
|
if req == nil {
|
||||||
|
return grpcErrorFromServiceError(respond.MissingParam)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func jsonResponse(value any, err error) (*pb.JSONResponse, error) {
|
||||||
|
if err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(err)
|
||||||
|
}
|
||||||
|
raw, err := json.Marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return nil, grpcErrorFromServiceError(err)
|
||||||
|
}
|
||||||
|
return &pb.JSONResponse{DataJson: raw}, nil
|
||||||
|
}
|
||||||
123
backend/services/schedule/rpc/pb/schedule.pb.go
Normal file
123
backend/services/schedule/rpc/pb/schedule.pb.go
Normal file
@@ -0,0 +1,123 @@
|
|||||||
|
package pb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
|
||||||
|
var _ = proto.Marshal
|
||||||
|
|
||||||
|
const _ = proto.ProtoPackageIsVersion3
|
||||||
|
|
||||||
|
type UserRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *UserRequest) Reset() { *m = UserRequest{} }
|
||||||
|
func (m *UserRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*UserRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type WeekRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
Week int64 `protobuf:"varint,2,opt,name=week,proto3" json:"week,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *WeekRequest) Reset() { *m = WeekRequest{} }
|
||||||
|
func (m *WeekRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*WeekRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type DeleteEventsRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
EventsJson []byte `protobuf:"bytes,2,opt,name=events_json,json=eventsJson,proto3" json:"events_json,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *DeleteEventsRequest) Reset() { *m = DeleteEventsRequest{} }
|
||||||
|
func (m *DeleteEventsRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*DeleteEventsRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type RecentCompletedRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
Index int64 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"`
|
||||||
|
Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *RecentCompletedRequest) Reset() { *m = RecentCompletedRequest{} }
|
||||||
|
func (m *RecentCompletedRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*RecentCompletedRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type RevokeTaskItemRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
EventId int64 `protobuf:"varint,2,opt,name=event_id,json=eventId,proto3" json:"event_id,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *RevokeTaskItemRequest) Reset() { *m = RevokeTaskItemRequest{} }
|
||||||
|
func (m *RevokeTaskItemRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*RevokeTaskItemRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type SmartPlanningRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
TaskClassId int64 `protobuf:"varint,2,opt,name=task_class_id,json=taskClassId,proto3" json:"task_class_id,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SmartPlanningRequest) Reset() { *m = SmartPlanningRequest{} }
|
||||||
|
func (m *SmartPlanningRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SmartPlanningRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type SmartPlanningMultiRequest struct {
|
||||||
|
UserId int64 `protobuf:"varint,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"`
|
||||||
|
TaskClassIds []int64 `protobuf:"varint,2,rep,packed,name=task_class_ids,json=taskClassIds,proto3" json:"task_class_ids,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *SmartPlanningMultiRequest) Reset() { *m = SmartPlanningMultiRequest{} }
|
||||||
|
func (m *SmartPlanningMultiRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*SmartPlanningMultiRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type JSONRequest struct {
|
||||||
|
PayloadJson []byte `protobuf:"bytes,1,opt,name=payload_json,json=payloadJson,proto3" json:"payload_json,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *JSONRequest) Reset() { *m = JSONRequest{} }
|
||||||
|
func (m *JSONRequest) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*JSONRequest) ProtoMessage() {}
|
||||||
|
|
||||||
|
type JSONResponse struct {
|
||||||
|
DataJson []byte `protobuf:"bytes,1,opt,name=data_json,json=dataJson,proto3" json:"data_json,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *JSONResponse) Reset() { *m = JSONResponse{} }
|
||||||
|
func (m *JSONResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*JSONResponse) ProtoMessage() {}
|
||||||
|
|
||||||
|
type StatusResponse struct {
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StatusResponse) Reset() { *m = StatusResponse{} }
|
||||||
|
func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*StatusResponse) ProtoMessage() {}
|
||||||
379
backend/services/schedule/rpc/pb/schedule_grpc.pb.go
Normal file
379
backend/services/schedule/rpc/pb/schedule_grpc.pb.go
Normal file
@@ -0,0 +1,379 @@
|
|||||||
|
package pb
|
||||||
|
|
||||||
|
import (
|
||||||
|
context "context"
|
||||||
|
|
||||||
|
grpc "google.golang.org/grpc"
|
||||||
|
codes "google.golang.org/grpc/codes"
|
||||||
|
status "google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
Schedule_Ping_FullMethodName = "/smartflow.schedule.Schedule/Ping"
|
||||||
|
Schedule_GetToday_FullMethodName = "/smartflow.schedule.Schedule/GetToday"
|
||||||
|
Schedule_GetWeek_FullMethodName = "/smartflow.schedule.Schedule/GetWeek"
|
||||||
|
Schedule_DeleteEvents_FullMethodName = "/smartflow.schedule.Schedule/DeleteEvents"
|
||||||
|
Schedule_GetRecentCompleted_FullMethodName = "/smartflow.schedule.Schedule/GetRecentCompleted"
|
||||||
|
Schedule_GetCurrent_FullMethodName = "/smartflow.schedule.Schedule/GetCurrent"
|
||||||
|
Schedule_RevokeTaskItem_FullMethodName = "/smartflow.schedule.Schedule/RevokeTaskItem"
|
||||||
|
Schedule_SmartPlanning_FullMethodName = "/smartflow.schedule.Schedule/SmartPlanning"
|
||||||
|
Schedule_SmartPlanningMulti_FullMethodName = "/smartflow.schedule.Schedule/SmartPlanningMulti"
|
||||||
|
Schedule_GetScheduleFactsByWindow_FullMethodName = "/smartflow.schedule.Schedule/GetScheduleFactsByWindow"
|
||||||
|
Schedule_GetFeedbackSignal_FullMethodName = "/smartflow.schedule.Schedule/GetFeedbackSignal"
|
||||||
|
Schedule_ApplyActiveScheduleChanges_FullMethodName = "/smartflow.schedule.Schedule/ApplyActiveScheduleChanges"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ScheduleClient interface {
|
||||||
|
Ping(ctx context.Context, in *StatusResponse, opts ...grpc.CallOption) (*StatusResponse, error)
|
||||||
|
GetToday(ctx context.Context, in *UserRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
GetWeek(ctx context.Context, in *WeekRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
DeleteEvents(ctx context.Context, in *DeleteEventsRequest, opts ...grpc.CallOption) (*StatusResponse, error)
|
||||||
|
GetRecentCompleted(ctx context.Context, in *RecentCompletedRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
GetCurrent(ctx context.Context, in *UserRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
RevokeTaskItem(ctx context.Context, in *RevokeTaskItemRequest, opts ...grpc.CallOption) (*StatusResponse, error)
|
||||||
|
SmartPlanning(ctx context.Context, in *SmartPlanningRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
SmartPlanningMulti(ctx context.Context, in *SmartPlanningMultiRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
GetScheduleFactsByWindow(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
GetFeedbackSignal(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
ApplyActiveScheduleChanges(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type scheduleClient struct {
|
||||||
|
cc grpc.ClientConnInterface
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewScheduleClient(cc grpc.ClientConnInterface) ScheduleClient {
|
||||||
|
return &scheduleClient{cc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) Ping(ctx context.Context, in *StatusResponse, opts ...grpc.CallOption) (*StatusResponse, error) {
|
||||||
|
out := new(StatusResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_Ping_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) GetToday(ctx context.Context, in *UserRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_GetToday_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) GetWeek(ctx context.Context, in *WeekRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_GetWeek_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) DeleteEvents(ctx context.Context, in *DeleteEventsRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
|
||||||
|
out := new(StatusResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_DeleteEvents_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) GetRecentCompleted(ctx context.Context, in *RecentCompletedRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_GetRecentCompleted_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) GetCurrent(ctx context.Context, in *UserRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_GetCurrent_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) RevokeTaskItem(ctx context.Context, in *RevokeTaskItemRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
|
||||||
|
out := new(StatusResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_RevokeTaskItem_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) SmartPlanning(ctx context.Context, in *SmartPlanningRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_SmartPlanning_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) SmartPlanningMulti(ctx context.Context, in *SmartPlanningMultiRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_SmartPlanningMulti_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) GetScheduleFactsByWindow(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_GetScheduleFactsByWindow_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) GetFeedbackSignal(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_GetFeedbackSignal_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *scheduleClient) ApplyActiveScheduleChanges(ctx context.Context, in *JSONRequest, opts ...grpc.CallOption) (*JSONResponse, error) {
|
||||||
|
out := new(JSONResponse)
|
||||||
|
err := c.cc.Invoke(ctx, Schedule_ApplyActiveScheduleChanges_FullMethodName, in, out, opts...)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScheduleServer interface {
|
||||||
|
Ping(context.Context, *StatusResponse) (*StatusResponse, error)
|
||||||
|
GetToday(context.Context, *UserRequest) (*JSONResponse, error)
|
||||||
|
GetWeek(context.Context, *WeekRequest) (*JSONResponse, error)
|
||||||
|
DeleteEvents(context.Context, *DeleteEventsRequest) (*StatusResponse, error)
|
||||||
|
GetRecentCompleted(context.Context, *RecentCompletedRequest) (*JSONResponse, error)
|
||||||
|
GetCurrent(context.Context, *UserRequest) (*JSONResponse, error)
|
||||||
|
RevokeTaskItem(context.Context, *RevokeTaskItemRequest) (*StatusResponse, error)
|
||||||
|
SmartPlanning(context.Context, *SmartPlanningRequest) (*JSONResponse, error)
|
||||||
|
SmartPlanningMulti(context.Context, *SmartPlanningMultiRequest) (*JSONResponse, error)
|
||||||
|
GetScheduleFactsByWindow(context.Context, *JSONRequest) (*JSONResponse, error)
|
||||||
|
GetFeedbackSignal(context.Context, *JSONRequest) (*JSONResponse, error)
|
||||||
|
ApplyActiveScheduleChanges(context.Context, *JSONRequest) (*JSONResponse, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type UnimplementedScheduleServer struct{}
|
||||||
|
|
||||||
|
func (UnimplementedScheduleServer) Ping(context.Context, *StatusResponse) (*StatusResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (UnimplementedScheduleServer) GetToday(context.Context, *UserRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetToday not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) GetWeek(context.Context, *WeekRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetWeek not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) DeleteEvents(context.Context, *DeleteEventsRequest) (*StatusResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method DeleteEvents not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) GetRecentCompleted(context.Context, *RecentCompletedRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetRecentCompleted not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) GetCurrent(context.Context, *UserRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetCurrent not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) RevokeTaskItem(context.Context, *RevokeTaskItemRequest) (*StatusResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method RevokeTaskItem not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) SmartPlanning(context.Context, *SmartPlanningRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method SmartPlanning not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) SmartPlanningMulti(context.Context, *SmartPlanningMultiRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method SmartPlanningMulti not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) GetScheduleFactsByWindow(context.Context, *JSONRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetScheduleFactsByWindow not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) GetFeedbackSignal(context.Context, *JSONRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method GetFeedbackSignal not implemented")
|
||||||
|
}
|
||||||
|
func (UnimplementedScheduleServer) ApplyActiveScheduleChanges(context.Context, *JSONRequest) (*JSONResponse, error) {
|
||||||
|
return nil, status.Errorf(codes.Unimplemented, "method ApplyActiveScheduleChanges not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func RegisterScheduleServer(s grpc.ServiceRegistrar, srv ScheduleServer) {
|
||||||
|
s.RegisterService(&Schedule_ServiceDesc, srv)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(StatusResponse)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).Ping(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_Ping_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).Ping(ctx, req.(*StatusResponse))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_GetToday_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(UserRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).GetToday(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_GetToday_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).GetToday(ctx, req.(*UserRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_GetWeek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(WeekRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).GetWeek(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_GetWeek_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).GetWeek(ctx, req.(*WeekRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_DeleteEvents_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(DeleteEventsRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).DeleteEvents(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_DeleteEvents_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).DeleteEvents(ctx, req.(*DeleteEventsRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_GetRecentCompleted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(RecentCompletedRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).GetRecentCompleted(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_GetRecentCompleted_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).GetRecentCompleted(ctx, req.(*RecentCompletedRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_GetCurrent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(UserRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).GetCurrent(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_GetCurrent_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).GetCurrent(ctx, req.(*UserRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_RevokeTaskItem_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(RevokeTaskItemRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).RevokeTaskItem(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_RevokeTaskItem_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).RevokeTaskItem(ctx, req.(*RevokeTaskItemRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_SmartPlanning_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(SmartPlanningRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).SmartPlanning(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_SmartPlanning_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).SmartPlanning(ctx, req.(*SmartPlanningRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_SmartPlanningMulti_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(SmartPlanningMultiRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).SmartPlanningMulti(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_SmartPlanningMulti_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).SmartPlanningMulti(ctx, req.(*SmartPlanningMultiRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_GetScheduleFactsByWindow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(JSONRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).GetScheduleFactsByWindow(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_GetScheduleFactsByWindow_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).GetScheduleFactsByWindow(ctx, req.(*JSONRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_GetFeedbackSignal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(JSONRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).GetFeedbackSignal(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_GetFeedbackSignal_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).GetFeedbackSignal(ctx, req.(*JSONRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Schedule_ApplyActiveScheduleChanges_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||||
|
in := new(JSONRequest)
|
||||||
|
if err := dec(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if interceptor == nil {
|
||||||
|
return srv.(ScheduleServer).ApplyActiveScheduleChanges(ctx, in)
|
||||||
|
}
|
||||||
|
info := &grpc.UnaryServerInfo{Server: srv, FullMethod: Schedule_ApplyActiveScheduleChanges_FullMethodName}
|
||||||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||||
|
return srv.(ScheduleServer).ApplyActiveScheduleChanges(ctx, req.(*JSONRequest))
|
||||||
|
}
|
||||||
|
return interceptor(ctx, in, info, handler)
|
||||||
|
}
|
||||||
|
|
||||||
|
var Schedule_ServiceDesc = grpc.ServiceDesc{
|
||||||
|
ServiceName: "smartflow.schedule.Schedule",
|
||||||
|
HandlerType: (*ScheduleServer)(nil),
|
||||||
|
Methods: []grpc.MethodDesc{
|
||||||
|
{MethodName: "Ping", Handler: _Schedule_Ping_Handler},
|
||||||
|
{MethodName: "GetToday", Handler: _Schedule_GetToday_Handler},
|
||||||
|
{MethodName: "GetWeek", Handler: _Schedule_GetWeek_Handler},
|
||||||
|
{MethodName: "DeleteEvents", Handler: _Schedule_DeleteEvents_Handler},
|
||||||
|
{MethodName: "GetRecentCompleted", Handler: _Schedule_GetRecentCompleted_Handler},
|
||||||
|
{MethodName: "GetCurrent", Handler: _Schedule_GetCurrent_Handler},
|
||||||
|
{MethodName: "RevokeTaskItem", Handler: _Schedule_RevokeTaskItem_Handler},
|
||||||
|
{MethodName: "SmartPlanning", Handler: _Schedule_SmartPlanning_Handler},
|
||||||
|
{MethodName: "SmartPlanningMulti", Handler: _Schedule_SmartPlanningMulti_Handler},
|
||||||
|
{MethodName: "GetScheduleFactsByWindow", Handler: _Schedule_GetScheduleFactsByWindow_Handler},
|
||||||
|
{MethodName: "GetFeedbackSignal", Handler: _Schedule_GetFeedbackSignal_Handler},
|
||||||
|
{MethodName: "ApplyActiveScheduleChanges", Handler: _Schedule_ApplyActiveScheduleChanges_Handler},
|
||||||
|
},
|
||||||
|
Streams: []grpc.StreamDesc{},
|
||||||
|
Metadata: "schedule.proto",
|
||||||
|
}
|
||||||
66
backend/services/schedule/rpc/schedule.proto
Normal file
66
backend/services/schedule/rpc/schedule.proto
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package smartflow.schedule;
|
||||||
|
|
||||||
|
option go_package = "github.com/LoveLosita/smartflow/backend/services/schedule/rpc/pb";
|
||||||
|
|
||||||
|
service Schedule {
|
||||||
|
rpc Ping(StatusResponse) returns (StatusResponse);
|
||||||
|
rpc GetToday(UserRequest) returns (JSONResponse);
|
||||||
|
rpc GetWeek(WeekRequest) returns (JSONResponse);
|
||||||
|
rpc DeleteEvents(DeleteEventsRequest) returns (StatusResponse);
|
||||||
|
rpc GetRecentCompleted(RecentCompletedRequest) returns (JSONResponse);
|
||||||
|
rpc GetCurrent(UserRequest) returns (JSONResponse);
|
||||||
|
rpc RevokeTaskItem(RevokeTaskItemRequest) returns (StatusResponse);
|
||||||
|
rpc SmartPlanning(SmartPlanningRequest) returns (JSONResponse);
|
||||||
|
rpc SmartPlanningMulti(SmartPlanningMultiRequest) returns (JSONResponse);
|
||||||
|
rpc GetScheduleFactsByWindow(JSONRequest) returns (JSONResponse);
|
||||||
|
rpc GetFeedbackSignal(JSONRequest) returns (JSONResponse);
|
||||||
|
rpc ApplyActiveScheduleChanges(JSONRequest) returns (JSONResponse);
|
||||||
|
}
|
||||||
|
|
||||||
|
message UserRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WeekRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
int64 week = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message DeleteEventsRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
bytes events_json = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RecentCompletedRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
int64 index = 2;
|
||||||
|
int64 limit = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message RevokeTaskItemRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
int64 event_id = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SmartPlanningRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
int64 task_class_id = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SmartPlanningMultiRequest {
|
||||||
|
int64 user_id = 1;
|
||||||
|
repeated int64 task_class_ids = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message JSONRequest {
|
||||||
|
bytes payload_json = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message JSONResponse {
|
||||||
|
bytes data_json = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message StatusResponse {
|
||||||
|
}
|
||||||
60
backend/services/schedule/rpc/server.go
Normal file
60
backend/services/schedule/rpc/server.go
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
package rpc
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/services/schedule/rpc/pb"
|
||||||
|
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||||
|
"github.com/zeromicro/go-zero/core/service"
|
||||||
|
"github.com/zeromicro/go-zero/zrpc"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultListenOn = "0.0.0.0:9084"
|
||||||
|
defaultTimeout = 6 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type ServerOptions struct {
|
||||||
|
ListenOn string
|
||||||
|
Timeout time.Duration
|
||||||
|
Service *schedulesv.ScheduleService
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServer 创建 schedule zrpc 服务端。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只负责 zrpc server 配置与 gRPC handler 注册;
|
||||||
|
// 2. 不创建数据库、Redis 或业务服务,它们由 cmd/schedule 管理;
|
||||||
|
// 3. 返回 listenOn 供进程入口打印启动日志。
|
||||||
|
func NewServer(opts ServerOptions) (*zrpc.RpcServer, string, error) {
|
||||||
|
if opts.Service == nil {
|
||||||
|
return nil, "", errors.New("schedule service dependency not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
listenOn := strings.TrimSpace(opts.ListenOn)
|
||||||
|
if listenOn == "" {
|
||||||
|
listenOn = defaultListenOn
|
||||||
|
}
|
||||||
|
timeout := opts.Timeout
|
||||||
|
if timeout <= 0 {
|
||||||
|
timeout = defaultTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
server, err := zrpc.NewServer(zrpc.RpcServerConf{
|
||||||
|
ServiceConf: service.ServiceConf{
|
||||||
|
Name: "schedule.rpc",
|
||||||
|
Mode: service.DevMode,
|
||||||
|
},
|
||||||
|
ListenOn: listenOn,
|
||||||
|
Timeout: int64(timeout / time.Millisecond),
|
||||||
|
}, func(grpcServer *grpc.Server) {
|
||||||
|
pb.RegisterScheduleServer(grpcServer, NewHandler(opts.Service))
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, "", err
|
||||||
|
}
|
||||||
|
return server, listenOn, nil
|
||||||
|
}
|
||||||
114
backend/services/schedule/sv/contracts.go
Normal file
114
backend/services/schedule/sv/contracts.go
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
package sv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
rootmodel "github.com/LoveLosita/smartflow/backend/model"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||||
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DeleteScheduleEventByContract 把跨进程删除契约转换为既有 schedule 核心逻辑入参。
|
||||||
|
func (ss *ScheduleService) DeleteScheduleEventByContract(ctx context.Context, req schedulecontracts.DeleteScheduleEventsRequest) error {
|
||||||
|
events := make([]rootmodel.UserDeleteScheduleEvent, 0, len(req.Events))
|
||||||
|
for _, event := range req.Events {
|
||||||
|
events = append(events, rootmodel.UserDeleteScheduleEvent{
|
||||||
|
ID: event.ID,
|
||||||
|
DeleteCourse: event.DeleteCourse,
|
||||||
|
DeleteEmbeddedTask: event.DeleteEmbeddedTask,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return ss.DeleteScheduleEvent(ctx, events, req.UserID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetScheduleFactsByWindow 暴露主动调度需要的滚动窗口日程事实。
|
||||||
|
func (ss *ScheduleService) GetScheduleFactsByWindow(ctx context.Context, req schedulecontracts.ScheduleWindowRequest) (schedulecontracts.ScheduleWindowFacts, error) {
|
||||||
|
if ss == nil || ss.scheduleDAO == nil {
|
||||||
|
return schedulecontracts.ScheduleWindowFacts{}, errors.New("schedule facts service 未初始化")
|
||||||
|
}
|
||||||
|
return ss.scheduleDAO.GetScheduleFactsByWindow(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFeedbackSignal 暴露主动调度 unfinished_feedback 的日程目标定位事实。
|
||||||
|
func (ss *ScheduleService) GetFeedbackSignal(ctx context.Context, req schedulecontracts.FeedbackRequest) (schedulecontracts.FeedbackFact, bool, error) {
|
||||||
|
if ss == nil || ss.scheduleDAO == nil {
|
||||||
|
return schedulecontracts.FeedbackFact{}, false, errors.New("schedule feedback service 未初始化")
|
||||||
|
}
|
||||||
|
return ss.scheduleDAO.GetFeedbackSignal(ctx, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyActiveScheduleChanges 在 schedule 服务内执行主动调度正式写入。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只把 shared 契约转换为 schedule 私有 applyadapter 入参;
|
||||||
|
// 2. 具体事务、锁定、冲突检查和写库仍由搬运后的 applyadapter 负责;
|
||||||
|
// 3. 返回结果只包含正式落库 ID,不回写 active-scheduler preview 状态。
|
||||||
|
func (ss *ScheduleService) ApplyActiveScheduleChanges(ctx context.Context, req schedulecontracts.ApplyActiveScheduleRequest) (schedulecontracts.ApplyActiveScheduleResult, error) {
|
||||||
|
if ss == nil || ss.applyAdapter == nil {
|
||||||
|
return schedulecontracts.ApplyActiveScheduleResult{}, errors.New("schedule apply adapter 未初始化")
|
||||||
|
}
|
||||||
|
result, err := ss.applyAdapter.ApplyActiveScheduleChanges(ctx, toAdapterApplyRequest(req))
|
||||||
|
if err != nil {
|
||||||
|
return schedulecontracts.ApplyActiveScheduleResult{}, err
|
||||||
|
}
|
||||||
|
return schedulecontracts.ApplyActiveScheduleResult{
|
||||||
|
ApplyID: result.ApplyID,
|
||||||
|
AppliedEventIDs: result.AppliedEventIDs,
|
||||||
|
AppliedScheduleIDs: result.AppliedScheduleIDs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func toAdapterApplyRequest(req schedulecontracts.ApplyActiveScheduleRequest) applyadapter.ApplyActiveScheduleRequest {
|
||||||
|
changes := make([]applyadapter.ApplyChange, 0, len(req.Changes))
|
||||||
|
for _, change := range req.Changes {
|
||||||
|
changes = append(changes, applyadapter.ApplyChange{
|
||||||
|
ChangeID: change.ChangeID,
|
||||||
|
ChangeType: change.ChangeType,
|
||||||
|
TargetType: change.TargetType,
|
||||||
|
TargetID: change.TargetID,
|
||||||
|
ToSlot: toAdapterSlotSpan(change.ToSlot),
|
||||||
|
DurationSections: change.DurationSections,
|
||||||
|
Metadata: cloneStringMap(change.Metadata),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return applyadapter.ApplyActiveScheduleRequest{
|
||||||
|
PreviewID: req.PreviewID,
|
||||||
|
ApplyID: req.ApplyID,
|
||||||
|
UserID: req.UserID,
|
||||||
|
CandidateID: req.CandidateID,
|
||||||
|
Changes: changes,
|
||||||
|
RequestedAt: req.RequestedAt,
|
||||||
|
TraceID: req.TraceID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func toAdapterSlotSpan(span *schedulecontracts.SlotSpan) *applyadapter.SlotSpan {
|
||||||
|
if span == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &applyadapter.SlotSpan{
|
||||||
|
Start: applyadapter.Slot{
|
||||||
|
Week: span.Start.Week,
|
||||||
|
DayOfWeek: span.Start.DayOfWeek,
|
||||||
|
Section: span.Start.Section,
|
||||||
|
},
|
||||||
|
End: applyadapter.Slot{
|
||||||
|
Week: span.End.Week,
|
||||||
|
DayOfWeek: span.End.DayOfWeek,
|
||||||
|
Section: span.End.Section,
|
||||||
|
},
|
||||||
|
DurationSections: span.DurationSections,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func cloneStringMap(input map[string]string) map[string]string {
|
||||||
|
if len(input) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
output := make(map[string]string, len(input))
|
||||||
|
for key, value := range input {
|
||||||
|
output[key] = value
|
||||||
|
}
|
||||||
|
return output
|
||||||
|
}
|
||||||
881
backend/services/schedule/sv/service.go
Normal file
881
backend/services/schedule/sv/service.go
Normal file
@@ -0,0 +1,881 @@
|
|||||||
|
package sv
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"log"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/LoveLosita/smartflow/backend/conv"
|
||||||
|
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/logic"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/model"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/respond"
|
||||||
|
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||||
|
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||||
|
"github.com/go-redis/redis/v8"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ScheduleService struct {
|
||||||
|
scheduleDAO *scheduledao.ScheduleDAO
|
||||||
|
taskClassDAO *rootdao.TaskClassDAO
|
||||||
|
repoManager *rootdao.RepoManager // 统一管理多个 DAO 的事务
|
||||||
|
cacheDAO *rootdao.CacheDAO // 需要在 ScheduleService 中使用缓存
|
||||||
|
applyAdapter *applyadapter.GormApplyAdapter
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewScheduleService(scheduleDAO *scheduledao.ScheduleDAO, taskClassDAO *rootdao.TaskClassDAO, repoManager *rootdao.RepoManager, cacheDAO *rootdao.CacheDAO) *ScheduleService {
|
||||||
|
return &ScheduleService{
|
||||||
|
scheduleDAO: scheduleDAO,
|
||||||
|
taskClassDAO: taskClassDAO,
|
||||||
|
repoManager: repoManager,
|
||||||
|
cacheDAO: cacheDAO,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetApplyAdapter 注入正式日程 apply 端口。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只用于 schedule 独立服务接管 active-scheduler confirm/apply 写入;
|
||||||
|
// 2. 不改变既有 HTTP schedule 读写接口;
|
||||||
|
// 3. 未注入时 apply RPC 会明确返回初始化错误,避免静默写回旧路径。
|
||||||
|
func (ss *ScheduleService) SetApplyAdapter(applyAdapter *applyadapter.GormApplyAdapter) {
|
||||||
|
if ss != nil {
|
||||||
|
ss.applyAdapter = applyAdapter
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) GetUserTodaySchedule(ctx context.Context, userID int) ([]model.UserTodaySchedule, error) {
|
||||||
|
//1.先尝试从缓存获取数据
|
||||||
|
cachedResp, err := ss.cacheDAO.GetUserTodayScheduleFromCache(ctx, userID)
|
||||||
|
if err == nil {
|
||||||
|
// 缓存命中,直接返回
|
||||||
|
return cachedResp, nil
|
||||||
|
}
|
||||||
|
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||||
|
if !errors.Is(err, redis.Nil) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//2.获取当前日期
|
||||||
|
/*curTime := time.Now().Format("2006-01-02")*/
|
||||||
|
curTime := "2026-03-02" //测试数据
|
||||||
|
week, dayOfWeek, err := conv.RealDateToRelativeDate(curTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//3.查询用户当天的日程安排
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserTodaySchedule(ctx, userID, week, dayOfWeek) //测试数据
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//4.转换为前端需要的格式
|
||||||
|
todaySchedules := conv.SchedulesToUserTodaySchedule(schedules)
|
||||||
|
//5.将查询结果存入缓存,设置过期时间为当天结束
|
||||||
|
err = ss.cacheDAO.SetUserTodayScheduleToCache(ctx, userID, todaySchedules)
|
||||||
|
return todaySchedules, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) GetUserWeeklySchedule(ctx context.Context, userID, week int) (*model.UserWeekSchedule, error) {
|
||||||
|
//1.先检查 week 参数是否合法
|
||||||
|
if week < 0 || week > 25 {
|
||||||
|
return nil, respond.WeekOutOfRange
|
||||||
|
}
|
||||||
|
//2.先看看缓存里有没有数据(如果有的话直接返回,没有的话继续查库)
|
||||||
|
cachedResp, err := ss.cacheDAO.GetUserWeeklyScheduleFromCache(ctx, userID, week)
|
||||||
|
if err == nil {
|
||||||
|
// 缓存命中,直接返回
|
||||||
|
return cachedResp, nil
|
||||||
|
}
|
||||||
|
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||||
|
if !errors.Is(err, redis.Nil) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//3.查询用户每周的日程安排
|
||||||
|
//如果没有传入 week 参数,则默认查询当前周的日程安排
|
||||||
|
if week == 0 {
|
||||||
|
curTime := time.Now().Format("2006-01-02")
|
||||||
|
var err error
|
||||||
|
week, _, err = conv.RealDateToRelativeDate(curTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserWeeklySchedule(ctx, userID, week)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//3.转换为前端需要的格式
|
||||||
|
weeklySchedule := conv.SchedulesToUserWeeklySchedule(schedules)
|
||||||
|
weeklySchedule.Week = week
|
||||||
|
//4.将查询结果存入缓存,设置过期时间为一周(或者根据实际情况调整)
|
||||||
|
err = ss.cacheDAO.SetUserWeeklyScheduleToCache(ctx, userID, weeklySchedule)
|
||||||
|
return weeklySchedule, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) DeleteScheduleEvent(ctx context.Context, requests []model.UserDeleteScheduleEvent, userID int) error {
|
||||||
|
err := ss.repoManager.Transaction(ctx, func(txM *rootdao.RepoManager) error {
|
||||||
|
for _, req := range requests {
|
||||||
|
//1.如果要删课程和嵌入的事件
|
||||||
|
if req.DeleteEmbeddedTask && req.DeleteCourse {
|
||||||
|
//通过schedule表的embedded_task_id字段找到对应的task_id
|
||||||
|
taskID, err := txM.Schedule.GetScheduleEmbeddedTaskID(ctx, req.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//再将task_items表中对应的embedded_time字段设置为null
|
||||||
|
if taskID != 0 {
|
||||||
|
err = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//再删除课程事件和嵌入的事件(通过级联删除实现)
|
||||||
|
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//2.只删课程/事件
|
||||||
|
if req.DeleteCourse {
|
||||||
|
//2.1.检查课程是否有嵌入的任务事件
|
||||||
|
exists, err := txM.Schedule.IfScheduleEventIDExists(ctx, req.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !exists {
|
||||||
|
return respond.WrongScheduleEventID
|
||||||
|
}
|
||||||
|
embeddedTaskID, err := txM.Schedule.GetScheduleEmbeddedTaskID(ctx, req.ID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//2.2.如果有,则需另外为其创建新的scheduleEvent(type=task)
|
||||||
|
//课程事件先删除后再创建任务事件
|
||||||
|
if embeddedTaskID != 0 {
|
||||||
|
//2.2.1.先通过id取出taskClassItem详情
|
||||||
|
taskClassItem, err := txM.TaskClass.GetTaskClassItemByID(ctx, embeddedTaskID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//下方开启事务,删除课程事件并创建新的任务事件
|
||||||
|
//2.2.2.删除课程事件
|
||||||
|
txErr := txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
//2.2.3.再复用代码创建新的scheduleEvent,下方代码改编自AddTaskClassItemIntoSchedule函数
|
||||||
|
//直接构造Schedule模型
|
||||||
|
sections := make([]int, 0, taskClassItem.EmbeddedTime.SectionTo-taskClassItem.EmbeddedTime.SectionFrom+1)
|
||||||
|
// 这里的 req 主要是为了传递 Week 和 DayOfWeek,其他字段不需要了
|
||||||
|
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(
|
||||||
|
&model.UserInsertTaskClassItemToScheduleRequest{
|
||||||
|
Week: taskClassItem.EmbeddedTime.Week,
|
||||||
|
DayOfWeek: taskClassItem.EmbeddedTime.DayOfWeek},
|
||||||
|
taskClassItem, nil, userID, taskClassItem.EmbeddedTime.SectionFrom, taskClassItem.EmbeddedTime.SectionTo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//将节次区间转换为节次切片,方便后续检查冲突
|
||||||
|
for section := taskClassItem.EmbeddedTime.SectionFrom; section <= taskClassItem.EmbeddedTime.SectionTo; section++ {
|
||||||
|
sections = append(sections, section)
|
||||||
|
}
|
||||||
|
//单用户不存在删除时这个格子被占用的情况,所以不检查冲突了
|
||||||
|
/*//4.1 统一检查冲突(避免逐条查库)
|
||||||
|
conflict, err := ss.scheduleDAO.HasUserScheduleConflict(ctx, userID, req.Week, req.DayOfWeek, sections)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if conflict {
|
||||||
|
return respond.ScheduleConflict
|
||||||
|
}*/
|
||||||
|
// 5. 写入数据库(通过 RepoManager 统一管理事务)
|
||||||
|
// 这里的 sv.daoManager 是你在初始化 Service 时注入的全局 RepoManager 实例
|
||||||
|
// 5.1 使用事务中的 ScheduleRepo 插入 Event
|
||||||
|
eventID, txErr := txM.Schedule.AddScheduleEvent(scheduleEvent)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr // 触发回滚
|
||||||
|
}
|
||||||
|
// 5.2 关联 ID(纯内存操作,无需 tx)
|
||||||
|
for i := range schedules {
|
||||||
|
schedules[i].EventID = eventID
|
||||||
|
}
|
||||||
|
// 5.3 使用事务中的 ScheduleRepo 批量插入原子槽位
|
||||||
|
if _, txErr = txM.Schedule.AddSchedules(schedules); txErr != nil {
|
||||||
|
return txErr // 触发回滚
|
||||||
|
}
|
||||||
|
// 5.4 使用事务中的 TaskRepo 更新任务状态
|
||||||
|
if txErr = txM.TaskClass.UpdateTaskClassItemEmbeddedTime(ctx, embeddedTaskID, taskClassItem.EmbeddedTime); txErr != nil {
|
||||||
|
return txErr // 触发回滚
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//2.3.如果没有嵌入的事件,就直接删除课程事件
|
||||||
|
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//先通过rel_id找到对应的task_id
|
||||||
|
taskID, txErr := txM.Schedule.GetRelIDByScheduleEventID(ctx, req.ID)
|
||||||
|
if txErr != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//2.4.如果是任务块,转而去清除task_items表中的嵌入时间
|
||||||
|
if taskID != 0 {
|
||||||
|
//再将task_items表中对应的embedded_time字段设置为null
|
||||||
|
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
//3.只删嵌入的事件
|
||||||
|
if req.DeleteEmbeddedTask {
|
||||||
|
//下面先设置schedule表的embedded_task_id字段为null,再设置task_items表的embedded_time字段为null,实现删除嵌入事件的效果
|
||||||
|
//3.1.先将schedule表的embedded_task_id字段设置为null
|
||||||
|
taskID, txErr := txM.Schedule.SetScheduleEmbeddedTaskIDToNull(ctx, req.ID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
//3.2.再将task_items表的embedded_time字段设置为null
|
||||||
|
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) GetUserRecentCompletedSchedules(ctx context.Context, userID, index, limit int) (*model.UserRecentCompletedScheduleResponse, error) {
|
||||||
|
//1.先查缓存
|
||||||
|
cachedResp, err := ss.cacheDAO.GetUserRecentCompletedSchedulesFromCache(ctx, userID, index, limit)
|
||||||
|
if err == nil {
|
||||||
|
// 缓存命中,直接返回
|
||||||
|
return cachedResp, nil
|
||||||
|
}
|
||||||
|
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||||
|
if !errors.Is(err, redis.Nil) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//2.查询用户最近完成的日程安排
|
||||||
|
//获取现在的时间
|
||||||
|
/*nowTime := time.Now()*/
|
||||||
|
nowTime := time.Date(2026, 6, 30, 12, 0, 0, 0, time.Local) //测试数据
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserRecentCompletedSchedules(ctx, nowTime, userID, index, limit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//3.转换为前端需要的格式
|
||||||
|
result := conv.SchedulesToRecentCompletedSchedules(schedules)
|
||||||
|
//4.将查询结果存入缓存,设置过期时间为30分钟(根据实际情况调整)
|
||||||
|
err = ss.cacheDAO.SetUserRecentCompletedSchedulesToCache(ctx, userID, index, limit, result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) GetUserOngoingSchedule(ctx context.Context, userID int) (*model.OngoingSchedule, error) {
|
||||||
|
//1.先查缓存
|
||||||
|
cachedResp, err := ss.cacheDAO.GetUserOngoingScheduleFromCache(ctx, userID)
|
||||||
|
if err == nil && cachedResp == nil {
|
||||||
|
// 之前缓存过没有正在进行的日程,直接返回 nil
|
||||||
|
return nil, respond.NoOngoingOrUpcomingSchedule
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// 缓存命中,直接返回
|
||||||
|
return cachedResp, nil
|
||||||
|
}
|
||||||
|
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||||
|
if !errors.Is(err, redis.Nil) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//2.查询用户正在进行的日程安排
|
||||||
|
/*nowTime := time.Now()*/
|
||||||
|
nowTime := time.Date(2026, 6, 30, 18, 50, 0, 0, time.Local) //测试数据
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserOngoingSchedule(ctx, userID, nowTime)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//3.转换为前端需要的格式
|
||||||
|
result := conv.SchedulesToUserOngoingSchedule(schedules)
|
||||||
|
if result != nil {
|
||||||
|
if result.StartTime.After(nowTime) {
|
||||||
|
result.TimeStatus = "upcoming"
|
||||||
|
} else {
|
||||||
|
result.TimeStatus = "ongoing"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
//4.将查询结果存入缓存,设置过期时间直到此任务结束(根据实际情况调整)
|
||||||
|
err = ss.cacheDAO.SetUserOngoingScheduleToCache(ctx, userID, result)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if result == nil {
|
||||||
|
// 没有正在进行或即将开始的日程,返回特定错误
|
||||||
|
return nil, respond.NoOngoingOrUpcomingSchedule
|
||||||
|
}
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) RevocateUserTaskClassItem(ctx context.Context, userID, eventID int) error {
|
||||||
|
//1.先查库,看看这个event是任务事件还是课程事件,以及判断它是否属于用户
|
||||||
|
eventType, err := ss.scheduleDAO.GetScheduleTypeByEventID(ctx, eventID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//2.根据查询结果进行不同的撤销操作
|
||||||
|
if eventType == "course" {
|
||||||
|
//下面开启事务,撤销嵌入事件
|
||||||
|
err := ss.repoManager.Transaction(ctx, func(txM *rootdao.RepoManager) error {
|
||||||
|
//下面先设置schedule表的embedded_task_id字段为null,再设置task_items表的embedded_time字段为null,实现删除嵌入事件的效果
|
||||||
|
//3.1.先将schedule表的embedded_task_id字段设置为null
|
||||||
|
taskID, txErr := txM.Schedule.SetScheduleEmbeddedTaskIDToNull(ctx, eventID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
//3.2.再将task_items表的embedded_time字段设置为null
|
||||||
|
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
//3.3.最后设置task_items表的status字段为已撤销
|
||||||
|
txErr = txM.Schedule.RevocateSchedulesByEventID(ctx, eventID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else if eventType == "task" {
|
||||||
|
//下面开启事务,撤销任务事件
|
||||||
|
err := ss.repoManager.Transaction(ctx, func(txM *rootdao.RepoManager) error {
|
||||||
|
//先通过rel_id找到对应的task_id
|
||||||
|
taskID, txErr := txM.Schedule.GetRelIDByScheduleEventID(ctx, eventID)
|
||||||
|
if txErr != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
//再将task_items表中对应的embedded_time字段设置为null
|
||||||
|
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||||
|
if txErr != nil {
|
||||||
|
return txErr
|
||||||
|
}
|
||||||
|
//最后将其从日程表中删除(通过级联删除实现)
|
||||||
|
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, eventID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Println("ScheduleService.RevocateUserTaskClassItem: eventType is neither embedded_task nor task, something must be wrong")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ss *ScheduleService) SmartPlanning(ctx context.Context, userID, taskClassID int) ([]model.UserWeekSchedule, error) {
|
||||||
|
//1.通过任务类id获取任务类详情
|
||||||
|
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//2.校验任务类的参数是否合法
|
||||||
|
if taskClass == nil {
|
||||||
|
return nil, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
if *taskClass.Mode != "auto" {
|
||||||
|
return nil, respond.TaskClassModeNotAuto
|
||||||
|
}
|
||||||
|
//3.获取任务类安排的时间范围内的全部周数信息(左右边界不足一周的情况也要算作一周)
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(ctx, userID, conv.CalculateFirstDayOfWeek(*taskClass.StartDate), conv.CalculateLastDayOfWeek(*taskClass.EndDate))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//4.将多个周的信息传入智能排课算法,获取推荐的时间安排(周+周内的天+节次)
|
||||||
|
result, err := logic.SmartPlanningMainLogic(schedules, taskClass)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
//5.将推荐的时间安排转换为前端需要的格式返回
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SmartPlanningRaw 执行粗排算法并同时返回展示结构和已分配的任务项。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 与 SmartPlanning 共享完全相同的前置校验和粗排逻辑;
|
||||||
|
// 2. 额外返回 allocatedItems(每项的 EmbeddedTime 已由算法回填),
|
||||||
|
// 供 Agent 排程链路直接转换为 BatchApplyPlans 请求,无需再让模型"二次分配"。
|
||||||
|
func (ss *ScheduleService) SmartPlanningRaw(ctx context.Context, userID, taskClassID int) ([]model.UserWeekSchedule, []model.TaskClassItem, error) {
|
||||||
|
// 1. 获取任务类详情。
|
||||||
|
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if taskClass == nil {
|
||||||
|
return nil, nil, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
if *taskClass.Mode != "auto" {
|
||||||
|
return nil, nil, respond.TaskClassModeNotAuto
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 获取时间范围内的全部日程。
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(ctx, userID, conv.CalculateFirstDayOfWeek(*taskClass.StartDate), conv.CalculateLastDayOfWeek(*taskClass.EndDate))
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. 执行粗排算法,拿到已分配的 items(EmbeddedTime 已回填)。
|
||||||
|
allocatedItems, err := logic.SmartPlanningRawItems(schedules, taskClass)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. 同时生成展示结构,供 SSE 阶段推送给前端预览。
|
||||||
|
displayResult := conv.PlanningResultToUserWeekSchedules(schedules, allocatedItems)
|
||||||
|
return displayResult, allocatedItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SmartPlanningMulti 执行“多任务类智能粗排”,仅返回前端展示结构。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 负责把多任务类请求收口到统一粗排流程;
|
||||||
|
// 2. 负责返回展示结构;
|
||||||
|
// 3. 不返回底层分配细节(由 SmartPlanningMultiRaw 提供)。
|
||||||
|
func (ss *ScheduleService) SmartPlanningMulti(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, error) {
|
||||||
|
displayResult, _, err := ss.SmartPlanningMultiRaw(ctx, userID, taskClassIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return displayResult, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SmartPlanningMultiRaw 执行“多任务类智能粗排”,同时返回展示结构和已分配任务项。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 负责多任务类请求的完整前置处理(归一化/校验/排序/时间窗收敛);
|
||||||
|
// 2. 负责调用多任务类粗排主逻辑(共享资源池);
|
||||||
|
// 3. 只计算建议,不负责落库。
|
||||||
|
func (ss *ScheduleService) SmartPlanningMultiRaw(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, []model.TaskClassItem, error) {
|
||||||
|
// 1. 输入归一化。
|
||||||
|
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||||
|
if len(normalizedIDs) == 0 {
|
||||||
|
return nil, nil, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 批量读取完整任务类(含 Items)。
|
||||||
|
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. 校验任务类并计算全局时间窗。
|
||||||
|
orderedTaskClasses, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. 拉取全局时间窗内的既有日程底板。
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||||
|
ctx,
|
||||||
|
userID,
|
||||||
|
conv.CalculateFirstDayOfWeek(globalStartDate),
|
||||||
|
conv.CalculateLastDayOfWeek(globalEndDate),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. 执行多任务类粗排(共享资源池 + 增量占位)。
|
||||||
|
allocatedItems, err := logic.SmartPlanningRawItemsMulti(schedules, orderedTaskClasses)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 6. 转换前端展示结构。
|
||||||
|
displayResult := conv.PlanningResultToUserWeekSchedules(schedules, allocatedItems)
|
||||||
|
return displayResult, allocatedItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolvePlanningWindowByTaskClasses 解析“多任务类排程窗口”的相对周/天边界。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只负责根据 task_class_ids 计算全局起止日期并转换成相对周/天;
|
||||||
|
// 2. 不执行粗排、不查询课表、不生成 HybridEntries;
|
||||||
|
// 3. 供 Agent 周级 Move 工具做硬边界校验,防止越界移动。
|
||||||
|
//
|
||||||
|
// 返回语义:
|
||||||
|
// 1. startWeek/startDay:允许排程的起点(含);
|
||||||
|
// 2. endWeek/endDay:允许排程的终点(含);
|
||||||
|
// 3. error:任何校验或日期转换失败都返回错误。
|
||||||
|
func (ss *ScheduleService) ResolvePlanningWindowByTaskClasses(ctx context.Context, userID int, taskClassIDs []int) (int, int, int, int, error) {
|
||||||
|
// 1. 输入归一化:过滤非法值并去重。
|
||||||
|
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||||
|
if len(normalizedIDs) == 0 {
|
||||||
|
return 0, 0, 0, 0, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 批量查询任务类并复用统一校验逻辑,拿到全局起止日期。
|
||||||
|
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, 0, err
|
||||||
|
}
|
||||||
|
_, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. 把绝对日期转换为“相对周/天”。
|
||||||
|
// 3.1 这里统一复用 conv.RealDateToRelativeDate,确保和现有排程口径一致;
|
||||||
|
// 3.2 若日期超出学期配置范围,直接返回错误,避免错误边界进入工具层。
|
||||||
|
startWeek, startDay, err := conv.RealDateToRelativeDate(globalStartDate.Format(conv.DateFormat))
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, 0, err
|
||||||
|
}
|
||||||
|
endWeek, endDay, err := conv.RealDateToRelativeDate(globalEndDate.Format(conv.DateFormat))
|
||||||
|
if err != nil {
|
||||||
|
return 0, 0, 0, 0, err
|
||||||
|
}
|
||||||
|
if endWeek < startWeek || (endWeek == startWeek && endDay < startDay) {
|
||||||
|
return 0, 0, 0, 0, respond.InvalidDateRange
|
||||||
|
}
|
||||||
|
return startWeek, startDay, endWeek, endDay, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalizeTaskClassIDsForMultiPlanning 归一化 task_class_ids(过滤非法值、去重并保序)。
|
||||||
|
func normalizeTaskClassIDsForMultiPlanning(ids []int) []int {
|
||||||
|
if len(ids) == 0 {
|
||||||
|
return []int{}
|
||||||
|
}
|
||||||
|
normalized := make([]int, 0, len(ids))
|
||||||
|
seen := make(map[int]struct{}, len(ids))
|
||||||
|
for _, id := range ids {
|
||||||
|
if id <= 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if _, exists := seen[id]; exists {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
seen[id] = struct{}{}
|
||||||
|
normalized = append(normalized, id)
|
||||||
|
}
|
||||||
|
return normalized
|
||||||
|
}
|
||||||
|
|
||||||
|
// prepareTaskClassesForMultiPlanning 把 DAO 结果转成可直接粗排的数据集。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 校验每个任务类可参与自动排程;
|
||||||
|
// 2. 计算全局时间窗(最早开始 ~ 最晚结束);
|
||||||
|
// 3. 执行多任务类排序策略。
|
||||||
|
func prepareTaskClassesForMultiPlanning(taskClasses []model.TaskClass, orderedIDs []int) ([]*model.TaskClass, time.Time, time.Time, error) {
|
||||||
|
if len(orderedIDs) == 0 {
|
||||||
|
return nil, time.Time{}, time.Time{}, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
|
||||||
|
classByID := make(map[int]*model.TaskClass, len(taskClasses))
|
||||||
|
for i := range taskClasses {
|
||||||
|
tc := &taskClasses[i]
|
||||||
|
classByID[tc.ID] = tc
|
||||||
|
}
|
||||||
|
|
||||||
|
ordered := make([]*model.TaskClass, 0, len(orderedIDs))
|
||||||
|
var globalStart time.Time
|
||||||
|
var globalEnd time.Time
|
||||||
|
for idx, id := range orderedIDs {
|
||||||
|
taskClass, exists := classByID[id]
|
||||||
|
if !exists || taskClass == nil {
|
||||||
|
return nil, time.Time{}, time.Time{}, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
if taskClass.Mode == nil || *taskClass.Mode != "auto" {
|
||||||
|
return nil, time.Time{}, time.Time{}, respond.TaskClassModeNotAuto
|
||||||
|
}
|
||||||
|
if taskClass.StartDate == nil || taskClass.EndDate == nil {
|
||||||
|
return nil, time.Time{}, time.Time{}, respond.InvalidDateRange
|
||||||
|
}
|
||||||
|
start := *taskClass.StartDate
|
||||||
|
end := *taskClass.EndDate
|
||||||
|
if end.Before(start) {
|
||||||
|
return nil, time.Time{}, time.Time{}, respond.InvalidDateRange
|
||||||
|
}
|
||||||
|
if idx == 0 || start.Before(globalStart) {
|
||||||
|
globalStart = start
|
||||||
|
}
|
||||||
|
if idx == 0 || end.After(globalEnd) {
|
||||||
|
globalEnd = end
|
||||||
|
}
|
||||||
|
ordered = append(ordered, taskClass)
|
||||||
|
}
|
||||||
|
|
||||||
|
sortTaskClassesForMultiPlanning(ordered, orderedIDs)
|
||||||
|
return ordered, globalStart, globalEnd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sortTaskClassesForMultiPlanning 执行稳定排序:
|
||||||
|
// 1. end_date 早优先;
|
||||||
|
// 2. rapid 优先于 steady;
|
||||||
|
// 3. 输入顺序兜底。
|
||||||
|
func sortTaskClassesForMultiPlanning(taskClasses []*model.TaskClass, inputOrder []int) {
|
||||||
|
if len(taskClasses) <= 1 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
orderIndex := make(map[int]int, len(inputOrder))
|
||||||
|
for idx, id := range inputOrder {
|
||||||
|
orderIndex[id] = idx
|
||||||
|
}
|
||||||
|
|
||||||
|
sort.SliceStable(taskClasses, func(i, j int) bool {
|
||||||
|
left := taskClasses[i]
|
||||||
|
right := taskClasses[j]
|
||||||
|
if left == nil || right == nil {
|
||||||
|
return left != nil
|
||||||
|
}
|
||||||
|
if left.EndDate != nil && right.EndDate != nil && !left.EndDate.Equal(*right.EndDate) {
|
||||||
|
return left.EndDate.Before(*right.EndDate)
|
||||||
|
}
|
||||||
|
leftRapid := left.Strategy != nil && *left.Strategy == "rapid"
|
||||||
|
rightRapid := right.Strategy != nil && *right.Strategy == "rapid"
|
||||||
|
if leftRapid != rightRapid {
|
||||||
|
return leftRapid
|
||||||
|
}
|
||||||
|
leftOrder, leftOK := orderIndex[left.ID]
|
||||||
|
rightOrder, rightOK := orderIndex[right.ID]
|
||||||
|
if leftOK && rightOK && leftOrder != rightOrder {
|
||||||
|
return leftOrder < rightOrder
|
||||||
|
}
|
||||||
|
return left.ID < right.ID
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// HybridScheduleWithPlan 构建“单任务类”混合日程(existing + suggested)。
|
||||||
|
func (ss *ScheduleService) HybridScheduleWithPlan(
|
||||||
|
ctx context.Context, userID, taskClassID int,
|
||||||
|
) ([]model.HybridScheduleEntry, []model.TaskClassItem, error) {
|
||||||
|
// 1. 校验并读取任务类。
|
||||||
|
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if taskClass == nil {
|
||||||
|
return nil, nil, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
if taskClass.Mode == nil || *taskClass.Mode != "auto" {
|
||||||
|
return nil, nil, respond.TaskClassModeNotAuto
|
||||||
|
}
|
||||||
|
if taskClass.StartDate == nil || taskClass.EndDate == nil {
|
||||||
|
return nil, nil, respond.InvalidDateRange
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 拉取时间窗内既有日程。
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||||
|
ctx, userID,
|
||||||
|
conv.CalculateFirstDayOfWeek(*taskClass.StartDate),
|
||||||
|
conv.CalculateLastDayOfWeek(*taskClass.EndDate),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. 执行粗排。
|
||||||
|
allocatedItems, err := logic.SmartPlanningRawItems(schedules, taskClass)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. 统一合并。
|
||||||
|
entries := buildHybridEntriesFromSchedulesAndAllocated(schedules, allocatedItems)
|
||||||
|
return entries, allocatedItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// HybridScheduleWithPlanMulti 构建“多任务类”混合日程(existing + suggested)。
|
||||||
|
func (ss *ScheduleService) HybridScheduleWithPlanMulti(
|
||||||
|
ctx context.Context,
|
||||||
|
userID int,
|
||||||
|
taskClassIDs []int,
|
||||||
|
) ([]model.HybridScheduleEntry, []model.TaskClassItem, error) {
|
||||||
|
// 1. 归一化任务类 ID。
|
||||||
|
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||||
|
if len(normalizedIDs) == 0 {
|
||||||
|
return nil, nil, respond.WrongTaskClassID
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 拉取任务类并做校验/排序。
|
||||||
|
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
orderedTaskClasses, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 3. 拉取全局时间窗内既有日程。
|
||||||
|
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||||
|
ctx,
|
||||||
|
userID,
|
||||||
|
conv.CalculateFirstDayOfWeek(globalStartDate),
|
||||||
|
conv.CalculateLastDayOfWeek(globalEndDate),
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 4. 多任务类粗排。
|
||||||
|
allocatedItems, err := logic.SmartPlanningRawItemsMulti(schedules, orderedTaskClasses)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// 5. 统一合并。
|
||||||
|
entries := buildHybridEntriesFromSchedulesAndAllocated(schedules, allocatedItems)
|
||||||
|
return entries, allocatedItems, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildHybridEntriesFromSchedulesAndAllocated 合并 existing/suggested 条目。
|
||||||
|
//
|
||||||
|
// 说明:
|
||||||
|
// 1. existing 按“事件 + 天 + 可嵌入语义 + 阻塞语义”分组,再按连续节次切块;
|
||||||
|
// 2. suggested 直接根据 allocatedItems 生成;
|
||||||
|
// 3. 仅做内存组装,不做数据库操作。
|
||||||
|
func buildHybridEntriesFromSchedulesAndAllocated(
|
||||||
|
schedules []model.Schedule,
|
||||||
|
allocatedItems []model.TaskClassItem,
|
||||||
|
) []model.HybridScheduleEntry {
|
||||||
|
entries := make([]model.HybridScheduleEntry, 0, len(schedules)/2+len(allocatedItems))
|
||||||
|
|
||||||
|
type eventGroupKey struct {
|
||||||
|
EventID int
|
||||||
|
Week int
|
||||||
|
DayOfWeek int
|
||||||
|
CanBeEmbedded bool
|
||||||
|
BlockForSuggested bool
|
||||||
|
}
|
||||||
|
type eventGroup struct {
|
||||||
|
Key eventGroupKey
|
||||||
|
Name string
|
||||||
|
Type string
|
||||||
|
Sections []int
|
||||||
|
}
|
||||||
|
groupMap := make(map[eventGroupKey]*eventGroup)
|
||||||
|
|
||||||
|
// 1. 先处理 existing。
|
||||||
|
for _, s := range schedules {
|
||||||
|
name := "未知"
|
||||||
|
typ := "course"
|
||||||
|
canBeEmbedded := false
|
||||||
|
if s.Event != nil {
|
||||||
|
name = s.Event.Name
|
||||||
|
typ = s.Event.Type
|
||||||
|
canBeEmbedded = s.Event.CanBeEmbedded
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1.1 阻塞语义:
|
||||||
|
// 1.1.1 task 默认阻塞;
|
||||||
|
// 1.1.2 course 且不可嵌入时阻塞;
|
||||||
|
// 1.1.3 course 且可嵌入时,若当前原子格未被 embedded_task 占用,则不阻塞。
|
||||||
|
blockForSuggested := true
|
||||||
|
if typ == "course" && canBeEmbedded && s.EmbeddedTaskID == nil {
|
||||||
|
blockForSuggested = false
|
||||||
|
}
|
||||||
|
|
||||||
|
key := eventGroupKey{
|
||||||
|
EventID: s.EventID,
|
||||||
|
Week: s.Week,
|
||||||
|
DayOfWeek: s.DayOfWeek,
|
||||||
|
CanBeEmbedded: canBeEmbedded,
|
||||||
|
BlockForSuggested: blockForSuggested,
|
||||||
|
}
|
||||||
|
group, ok := groupMap[key]
|
||||||
|
if !ok {
|
||||||
|
group = &eventGroup{
|
||||||
|
Key: key,
|
||||||
|
Name: name,
|
||||||
|
Type: typ,
|
||||||
|
}
|
||||||
|
groupMap[key] = group
|
||||||
|
}
|
||||||
|
group.Sections = append(group.Sections, s.Section)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, group := range groupMap {
|
||||||
|
if len(group.Sections) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
sort.Ints(group.Sections)
|
||||||
|
|
||||||
|
runStart := group.Sections[0]
|
||||||
|
prev := group.Sections[0]
|
||||||
|
flushRun := func(from, to int) {
|
||||||
|
entries = append(entries, model.HybridScheduleEntry{
|
||||||
|
Week: group.Key.Week,
|
||||||
|
DayOfWeek: group.Key.DayOfWeek,
|
||||||
|
SectionFrom: from,
|
||||||
|
SectionTo: to,
|
||||||
|
Name: group.Name,
|
||||||
|
Type: group.Type,
|
||||||
|
Status: "existing",
|
||||||
|
EventID: group.Key.EventID,
|
||||||
|
CanBeEmbedded: group.Key.CanBeEmbedded,
|
||||||
|
BlockForSuggested: group.Key.BlockForSuggested,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
for i := 1; i < len(group.Sections); i++ {
|
||||||
|
cur := group.Sections[i]
|
||||||
|
if cur == prev+1 {
|
||||||
|
prev = cur
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
flushRun(runStart, prev)
|
||||||
|
runStart = cur
|
||||||
|
prev = cur
|
||||||
|
}
|
||||||
|
flushRun(runStart, prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. 再处理 suggested。
|
||||||
|
for _, item := range allocatedItems {
|
||||||
|
if item.EmbeddedTime == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := "未命名任务"
|
||||||
|
if item.Content != nil && strings.TrimSpace(*item.Content) != "" {
|
||||||
|
name = strings.TrimSpace(*item.Content)
|
||||||
|
}
|
||||||
|
entries = append(entries, model.HybridScheduleEntry{
|
||||||
|
Week: item.EmbeddedTime.Week,
|
||||||
|
DayOfWeek: item.EmbeddedTime.DayOfWeek,
|
||||||
|
SectionFrom: item.EmbeddedTime.SectionFrom,
|
||||||
|
SectionTo: item.EmbeddedTime.SectionTo,
|
||||||
|
Name: name,
|
||||||
|
Type: "task",
|
||||||
|
Status: "suggested",
|
||||||
|
TaskItemID: item.ID,
|
||||||
|
TaskClassID: derefInt(item.CategoryID),
|
||||||
|
BlockForSuggested: true,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries
|
||||||
|
}
|
||||||
|
|
||||||
|
func derefInt(p *int) int {
|
||||||
|
if p == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return *p
|
||||||
|
}
|
||||||
151
backend/shared/contracts/schedule/types.go
Normal file
151
backend/shared/contracts/schedule/types.go
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
package schedule
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const (
|
||||||
|
TargetTypeTaskPool = "task_pool"
|
||||||
|
TargetTypeTaskItem = "task_item"
|
||||||
|
TargetTypeScheduleEvent = "schedule_event"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserRequest 是 schedule 只按用户读取数据的通用请求。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只承载鉴权后得到的 user_id;
|
||||||
|
// 2. 不承载 token、角色或 HTTP 参数;
|
||||||
|
// 3. 业务校验由 schedule 服务内部完成。
|
||||||
|
type UserRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type WeekRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
Week int `json:"week"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type DeleteScheduleEventsRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
Events []UserDeleteScheduleEvent `json:"events"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type UserDeleteScheduleEvent struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
DeleteCourse bool `json:"delete_course"`
|
||||||
|
DeleteEmbeddedTask bool `json:"delete_embedded_task"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RecentCompletedRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
Index int `json:"index"`
|
||||||
|
Limit int `json:"limit"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type RevokeTaskItemRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
EventID int `json:"event_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartPlanningRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
TaskClassID int `json:"task_class_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SmartPlanningMultiRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
TaskClassIDs []int `json:"task_class_ids"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slot 是跨进程表达日程原子节次的稳定契约。
|
||||||
|
type Slot struct {
|
||||||
|
Week int `json:"week"`
|
||||||
|
DayOfWeek int `json:"day_of_week"`
|
||||||
|
Section int `json:"section"`
|
||||||
|
StartAt time.Time `json:"start_at,omitempty"`
|
||||||
|
EndAt time.Time `json:"end_at,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScheduleEventFact struct {
|
||||||
|
ID int `json:"id"`
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
Title string `json:"title"`
|
||||||
|
SourceType string `json:"source_type"`
|
||||||
|
RelID int `json:"rel_id"`
|
||||||
|
IsDynamicTask bool `json:"is_dynamic_task"`
|
||||||
|
IsCompleted bool `json:"is_completed"`
|
||||||
|
Slots []Slot `json:"slots"`
|
||||||
|
TaskClassID int `json:"task_class_id"`
|
||||||
|
TaskItemID int `json:"task_item_id"`
|
||||||
|
CanBeShortened bool `json:"can_be_shortened"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScheduleWindowFacts struct {
|
||||||
|
Events []ScheduleEventFact `json:"events"`
|
||||||
|
OccupiedSlots []Slot `json:"occupied_slots"`
|
||||||
|
FreeSlots []Slot `json:"free_slots"`
|
||||||
|
NextDynamicTask *ScheduleEventFact `json:"next_dynamic_task,omitempty"`
|
||||||
|
TargetAlreadyScheduled bool `json:"target_already_scheduled"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ScheduleWindowRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
TargetType string `json:"target_type"`
|
||||||
|
TargetID int `json:"target_id"`
|
||||||
|
WindowStart time.Time `json:"window_start"`
|
||||||
|
WindowEnd time.Time `json:"window_end"`
|
||||||
|
Now time.Time `json:"now"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FeedbackRequest struct {
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
FeedbackID string `json:"feedback_id"`
|
||||||
|
IdempotencyKey string `json:"idempotency_key"`
|
||||||
|
TargetType string `json:"target_type"`
|
||||||
|
TargetID int `json:"target_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FeedbackFact struct {
|
||||||
|
FeedbackID string `json:"feedback_id"`
|
||||||
|
Text string `json:"text"`
|
||||||
|
TargetKnown bool `json:"target_known"`
|
||||||
|
TargetEventID int `json:"target_event_id"`
|
||||||
|
TargetTaskItemID int `json:"target_task_item_id"`
|
||||||
|
TargetTitle string `json:"target_title"`
|
||||||
|
SubmittedAt time.Time `json:"submitted_at"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type FeedbackResponse struct {
|
||||||
|
Feedback FeedbackFact `json:"feedback"`
|
||||||
|
Found bool `json:"found"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ApplyActiveScheduleRequest struct {
|
||||||
|
PreviewID string `json:"preview_id"`
|
||||||
|
ApplyID string `json:"apply_id"`
|
||||||
|
UserID int `json:"user_id"`
|
||||||
|
CandidateID string `json:"candidate_id"`
|
||||||
|
Changes []ApplyChange `json:"changes"`
|
||||||
|
RequestedAt time.Time `json:"requested_at"`
|
||||||
|
TraceID string `json:"trace_id"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ApplyChange struct {
|
||||||
|
ChangeID string `json:"change_id"`
|
||||||
|
ChangeType string `json:"change_type"`
|
||||||
|
TargetType string `json:"target_type"`
|
||||||
|
TargetID int `json:"target_id"`
|
||||||
|
ToSlot *SlotSpan `json:"to_slot,omitempty"`
|
||||||
|
DurationSections int `json:"duration_sections"`
|
||||||
|
Metadata map[string]string `json:"metadata,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type SlotSpan struct {
|
||||||
|
Start Slot `json:"start"`
|
||||||
|
End Slot `json:"end"`
|
||||||
|
DurationSections int `json:"duration_sections"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ApplyActiveScheduleResult struct {
|
||||||
|
ApplyID string `json:"apply_id"`
|
||||||
|
AppliedEventIDs []int `json:"applied_event_ids,omitempty"`
|
||||||
|
AppliedScheduleIDs []int `json:"applied_schedule_ids,omitempty"`
|
||||||
|
}
|
||||||
25
backend/shared/ports/schedule.go
Normal file
25
backend/shared/ports/schedule.go
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
package ports
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
|
||||||
|
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ScheduleCommandClient 是 gateway 调用 schedule 服务的最小能力集合。
|
||||||
|
//
|
||||||
|
// 职责边界:
|
||||||
|
// 1. 只覆盖当前 /api/v1/schedule HTTP 门面需要的能力;
|
||||||
|
// 2. 不暴露 schedule DAO、事务编排、粗排算法或 apply 状态机;
|
||||||
|
// 3. 复杂响应先以 JSON 透传,避免 gateway 复制 schedule 内部 DTO。
|
||||||
|
type ScheduleCommandClient interface {
|
||||||
|
GetUserTodaySchedule(ctx context.Context, userID int) (json.RawMessage, error)
|
||||||
|
GetUserWeeklySchedule(ctx context.Context, userID int, week int) (json.RawMessage, error)
|
||||||
|
DeleteScheduleEvent(ctx context.Context, req schedulecontracts.DeleteScheduleEventsRequest) error
|
||||||
|
GetUserRecentCompletedSchedules(ctx context.Context, req schedulecontracts.RecentCompletedRequest) (json.RawMessage, error)
|
||||||
|
GetUserOngoingSchedule(ctx context.Context, userID int) (json.RawMessage, error)
|
||||||
|
RevokeTaskItemFromSchedule(ctx context.Context, req schedulecontracts.RevokeTaskItemRequest) error
|
||||||
|
SmartPlanning(ctx context.Context, req schedulecontracts.SmartPlanningRequest) (json.RawMessage, error)
|
||||||
|
SmartPlanningMulti(ctx context.Context, req schedulecontracts.SmartPlanningMultiRequest) (json.RawMessage, error)
|
||||||
|
}
|
||||||
@@ -430,6 +430,15 @@ flowchart LR
|
|||||||
|
|
||||||
### 4.9 阶段 5:再拆 schedule / task / course / task-class
|
### 4.9 阶段 5:再拆 schedule / task / course / task-class
|
||||||
|
|
||||||
|
当前进展(2026-05-04 首刀):
|
||||||
|
|
||||||
|
1. `schedule` 已开始服务化:新增 `cmd/schedule`、`services/schedule/{dao,rpc,sv,core}`、`gateway/client/schedule`、`shared/contracts/schedule` 和 `shared/ports` schedule port。
|
||||||
|
2. gateway 的 `/api/v1/schedule/*` HTTP 门面已切到 schedule zrpc client;gateway 不再通过 `backend/service.ScheduleService` 直接承载 schedule HTTP 入口业务。
|
||||||
|
3. active-scheduler 的 schedule facts / feedback / confirm apply 已改为调用 schedule RPC adapter;`cmd/active-scheduler` 启动依赖检查已移除 `schedule_events`、`schedules`、`task_classes`、`task_items`,迁移期仍直接读取 `tasks`。
|
||||||
|
4. gateway schedule client 和 active-scheduler schedule RPC adapter 已接入 `Ping` 启动期健康检查;单体聊天主动调度 rerun 的 schedule facts / feedback / apply 也已切到 schedule RPC,task facts 暂时仍走 Gorm。
|
||||||
|
5. 旧实现仍保留:`backend/service/schedule.go`、`backend/dao/schedule.go`、active-scheduler 旧 Gorm apply adapter 暂时保留,用于 agent 迁移期、单体残留路径和回退。
|
||||||
|
6. 当前切流点:HTTP schedule 流量进入 `cmd/schedule`;active-scheduler 正式写日程进入 schedule 服务;course / task-class / agent 内部仍存在直接 DAO 调用,后续按域继续切。
|
||||||
|
|
||||||
目标:
|
目标:
|
||||||
|
|
||||||
1. 把正式日程和任务池的所有权拆出去。
|
1. 把正式日程和任务池的所有权拆出去。
|
||||||
|
|||||||
Reference in New Issue
Block a user