Version: 0.9.77.dev.260505
后端:
1.阶段 6 CP4/CP5 目录收口与共享边界纯化
- 将 backend 根目录收口为 services、client、gateway、cmd、shared 五个一级目录
- 收拢 bootstrap、inits、infra/kafka、infra/outbox、conv、respond、pkg、middleware,移除根目录旧实现与空目录
- 将 utils 下沉到 services/userauth/internal/auth,将 logic 下沉到 services/schedule/core/planning
- 将迁移期 runtime 桥接实现统一收拢到 services/runtime/{conv,dao,eventsvc,model},删除 shared/legacy 与未再被 import 的旧 service 实现
- 将 gateway/shared/respond 收口为 HTTP/Gin 错误写回适配,shared/respond 仅保留共享错误语义与状态映射
- 将 HTTP IdempotencyMiddleware 与 RateLimitMiddleware 收口到 gateway/middleware
- 将 GormCachePlugin 下沉到 shared/infra/gormcache,将共享 RateLimiter 下沉到 shared/infra/ratelimit,将 agent token budget 下沉到 services/agent/shared
- 删除 InitEino 兼容壳,收缩 cmd/internal/coreinit 仅保留旧组合壳残留域初始化语义
- 更新微服务迁移计划与桌面 checklist,补齐 CP4/CP5 当前切流点、目录终态与验证结果
- 完成 go test ./...、git diff --check 与最终真实 smoke;health、register/login、task/create+get、schedule/today、task-class/list、memory/items、agent chat/meta/timeline/context-stats 全部 200,SSE 合并结果为 CP5_OK 且 [DONE] 只有 1 个
This commit is contained in:
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -7,14 +7,14 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeschedulerdao "github.com/LoveLosita/smartflow/backend/services/active_scheduler/dao"
|
||||
activeschedulerrpc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc"
|
||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
einoinfra "github.com/LoveLosita/smartflow/backend/shared/infra/eino"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -31,7 +31,7 @@ func main() {
|
||||
log.Fatalf("failed to connect active-scheduler database: %v", err)
|
||||
}
|
||||
|
||||
aiHub, err := inits.InitEino()
|
||||
aiHub, err := einoinfra.InitEino()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize active-scheduler Eino runtime: %v", err)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
@@ -18,6 +16,8 @@ import (
|
||||
activeTrigger "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
func buildActiveSchedulePreviewConfirmService(activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
agentrpc "github.com/LoveLosita/smartflow/backend/services/agent/rpc"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -8,19 +8,11 @@ import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
|
||||
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
rootsvc "github.com/LoveLosita/smartflow/backend/service"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
memoryclient "github.com/LoveLosita/smartflow/backend/client/memory"
|
||||
scheduleclient "github.com/LoveLosita/smartflow/backend/client/schedule"
|
||||
taskclient "github.com/LoveLosita/smartflow/backend/client/task"
|
||||
taskclassclient "github.com/LoveLosita/smartflow/backend/client/taskclass"
|
||||
userauthclient "github.com/LoveLosita/smartflow/backend/client/userauth"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
@@ -35,6 +27,19 @@ import (
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||||
einoinfra "github.com/LoveLosita/smartflow/backend/shared/infra/eino"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
mysqlinfra "github.com/LoveLosita/smartflow/backend/shared/infra/mysql"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
redisinfra "github.com/LoveLosita/smartflow/backend/shared/infra/redis"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
@@ -48,7 +53,7 @@ type agentRuntime struct {
|
||||
repoManager *rootdao.RepoManager
|
||||
agentRepo *rootdao.AgentDAO
|
||||
cacheRepo *rootdao.CacheDAO
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
userAuthClient *userauthclient.Client
|
||||
service *agentsv.AgentService
|
||||
workersStarted bool
|
||||
}
|
||||
@@ -59,7 +64,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
return nil, fmt.Errorf("connect agent database failed: %w", err)
|
||||
}
|
||||
|
||||
redisClient, err := inits.OpenRedisFromConfig()
|
||||
redisClient, err := redisinfra.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("connect agent redis failed: %w", err)
|
||||
}
|
||||
@@ -69,7 +74,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
}
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err = db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err = db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
return fail(fmt.Errorf("initialize agent cache deleter failed: %w", err))
|
||||
}
|
||||
|
||||
@@ -94,8 +99,9 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
manager := rootdao.NewManager(db)
|
||||
agentRepo := rootdao.NewAgentDAO(db)
|
||||
taskRepo := rootdao.NewTaskDAO(db)
|
||||
taskServiceRepo := taskdao.NewTaskDAO(db)
|
||||
taskClassRepo := rootdao.NewTaskClassDAO(db)
|
||||
scheduleRepo := rootdao.NewScheduleDAO(db)
|
||||
scheduleServiceRepo := scheduledao.NewScheduleDAO(db)
|
||||
agentCacheRepo := rootdao.NewAgentCache(redisClient)
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
|
||||
@@ -110,9 +116,9 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
eventPublisher := buildAgentOutboxPublisher(outboxRepo)
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
|
||||
var userAuthClient *gatewayuserauth.Client
|
||||
var userAuthClient *userauthclient.Client
|
||||
if eventBus != nil {
|
||||
userAuthClient, err = gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
userAuthClient, err = userauthclient.NewClient(userauthclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
|
||||
Target: viper.GetString("userauth.rpc.target"),
|
||||
Timeout: viper.GetDuration("userauth.rpc.timeout"),
|
||||
@@ -122,7 +128,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
}
|
||||
}
|
||||
|
||||
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
|
||||
taskClient, err := taskclient.NewClient(taskclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
@@ -130,7 +136,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task zrpc client failed: %w", err))
|
||||
}
|
||||
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
|
||||
taskClassClient, err := taskclassclient.NewClient(taskclassclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
@@ -138,7 +144,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task-class zrpc client failed: %w", err))
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
scheduleClient, err := scheduleclient.NewClient(scheduleclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
@@ -146,7 +152,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize schedule zrpc client failed: %w", err))
|
||||
}
|
||||
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
|
||||
memoryClient, err := memoryclient.NewClient(memoryclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
|
||||
Target: viper.GetString("memory.rpc.target"),
|
||||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||||
@@ -155,9 +161,9 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
return fail(fmt.Errorf("initialize memory zrpc client failed: %w", err))
|
||||
}
|
||||
|
||||
taskService := rootsvc.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskService := tasksv.NewTaskService(taskServiceRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskService.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := rootsvc.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
scheduleService := schedulesv.NewScheduleService(scheduleServiceRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService := agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
@@ -286,7 +292,7 @@ func (r *agentRuntime) close() {
|
||||
}
|
||||
|
||||
func openAgentDBFromConfig() (*gorm.DB, error) {
|
||||
db, err := inits.OpenDBFromConfig()
|
||||
db, err := mysqlinfra.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -364,7 +370,7 @@ func ensureAgentRuntimeDependencyTables(db *gorm.DB) error {
|
||||
}
|
||||
|
||||
func buildAgentLLMService() (*llmservice.Service, error) {
|
||||
aiHub, err := inits.InitEino()
|
||||
aiHub, err := einoinfra.InitEino()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
coursedao "github.com/LoveLosita/smartflow/backend/services/course/dao"
|
||||
courserpc "github.com/LoveLosita/smartflow/backend/services/course/rpc"
|
||||
coursesv "github.com/LoveLosita/smartflow/backend/services/course/sv"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package inits
|
||||
package coreinit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/spf13/viper"
|
||||
"gorm.io/driver/mysql"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
mysqlinfra "github.com/LoveLosita/smartflow/backend/shared/infra/mysql"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -102,22 +101,7 @@ func backfillAutoMigrateData(db *gorm.DB) error {
|
||||
// 2. 不负责选择要迁移哪些模型,迁移入口必须由具体服务显式调用;
|
||||
// 3. 调用方负责决定这是单体残留域、user/auth 还是后续新服务的连接。
|
||||
func OpenDBFromConfig() (*gorm.DB, error) {
|
||||
host := viper.GetString("database.host")
|
||||
port := viper.GetString("database.port")
|
||||
user := viper.GetString("database.user")
|
||||
password := viper.GetString("database.password")
|
||||
dbname := viper.GetString("database.dbname")
|
||||
|
||||
dsn := fmt.Sprintf(
|
||||
"%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||
user, password, host, port, dbname,
|
||||
)
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
return mysqlinfra.OpenDBFromConfig()
|
||||
}
|
||||
|
||||
// AutoMigrateCoreStorage 执行当前单体残留域拥有的 schema 初始化。
|
||||
@@ -1,11 +1,10 @@
|
||||
package inits
|
||||
package coreinit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
redisinfra "github.com/LoveLosita/smartflow/backend/shared/infra/redis"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// OpenRedisFromConfig 只创建 Redis client 并做连通性校验。
|
||||
@@ -15,15 +14,7 @@ import (
|
||||
// 2. 不承载 user/auth 黑名单、token 额度等业务语义,那些语义已经收进 userauth 服务;
|
||||
// 3. 返回 error 给服务入口统一处理,避免基础设施包直接 log.Fatal 终止进程。
|
||||
func OpenRedisFromConfig() (*redis.Client, error) {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: viper.GetString("redis.host") + ":" + viper.GetString("redis.port"),
|
||||
Password: viper.GetString("redis.password"),
|
||||
DB: 0,
|
||||
})
|
||||
if _, err := rdb.Ping(context.Background()).Result(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rdb, nil
|
||||
return redisinfra.OpenRedisFromConfig()
|
||||
}
|
||||
|
||||
// InitCoreRedis 初始化当前单体残留域使用的 Redis 连接。
|
||||
@@ -8,10 +8,6 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorydao "github.com/LoveLosita/smartflow/backend/services/memory/dao"
|
||||
@@ -20,6 +16,10 @@ import (
|
||||
memorysv "github.com/LoveLosita/smartflow/backend/services/memory/sv"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
einoinfra "github.com/LoveLosita/smartflow/backend/shared/infra/eino"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -99,7 +99,7 @@ func main() {
|
||||
// 2. 当前启动入口与 cmd/start.go / cmd/active-scheduler 都需要 Eino 初始化,后续若出现第三处重复装配,应抽公共 bootstrap;
|
||||
// 3. 返回 ProClient 是因为现有 memory.Module 只需要 llmservice.Client,不需要完整 Service。
|
||||
func buildMemoryLLMClient() (*llmservice.Client, error) {
|
||||
aiHub, err := inits.InitEino()
|
||||
aiHub, err := einoinfra.InitEino()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
notificationdao "github.com/LoveLosita/smartflow/backend/services/notification/dao"
|
||||
notificationrpc "github.com/LoveLosita/smartflow/backend/services/notification/rpc"
|
||||
notificationsv "github.com/LoveLosita/smartflow/backend/services/notification/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulerpc "github.com/LoveLosita/smartflow/backend/services/schedule/rpc"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -36,7 +36,7 @@ func main() {
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize schedule cache deleter: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,27 +11,18 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
activeschedulerclient "github.com/LoveLosita/smartflow/backend/client/activescheduler"
|
||||
agentclient "github.com/LoveLosita/smartflow/backend/client/agent"
|
||||
courseclient "github.com/LoveLosita/smartflow/backend/client/course"
|
||||
memoryclient "github.com/LoveLosita/smartflow/backend/client/memory"
|
||||
notificationclient "github.com/LoveLosita/smartflow/backend/client/notification"
|
||||
scheduleclient "github.com/LoveLosita/smartflow/backend/client/schedule"
|
||||
taskclient "github.com/LoveLosita/smartflow/backend/client/task"
|
||||
taskclassclient "github.com/LoveLosita/smartflow/backend/client/taskclass"
|
||||
userauthclient "github.com/LoveLosita/smartflow/backend/client/userauth"
|
||||
coreinit "github.com/LoveLosita/smartflow/backend/cmd/internal/coreinit"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
|
||||
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
|
||||
gatewaycourse "github.com/LoveLosita/smartflow/backend/gateway/client/course"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
|
||||
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
gatewayrouter "github.com/LoveLosita/smartflow/backend/gateway/router"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
"github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
"github.com/LoveLosita/smartflow/backend/service"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
@@ -44,12 +35,27 @@ import (
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
|
||||
coursedao "github.com/LoveLosita/smartflow/backend/services/course/dao"
|
||||
coursesv "github.com/LoveLosita/smartflow/backend/services/course/sv"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
"github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
einoinfra "github.com/LoveLosita/smartflow/backend/shared/infra/eino"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
@@ -75,9 +81,9 @@ type appRuntime struct {
|
||||
agentCache *dao.AgentCache
|
||||
manager *dao.RepoManager
|
||||
outboxRepo *outboxinfra.Repository
|
||||
limiter *pkg.RateLimiter
|
||||
limiter *ratelimit.RateLimiter
|
||||
handlers *api.ApiHandlers
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
userAuthClient *userauthclient.Client
|
||||
}
|
||||
|
||||
// loadConfig 锻炼?
|
||||
@@ -154,23 +160,23 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := inits.ConnectCoreDB()
|
||||
db, err := coreinit.ConnectCoreDB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
rdb, err := inits.InitCoreRedis()
|
||||
rdb, err := coreinit.InitCoreRedis()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to redis: %w", err)
|
||||
}
|
||||
limiter := pkg.NewRateLimiter(rdb)
|
||||
limiter := ratelimit.NewRateLimiter(rdb)
|
||||
|
||||
// DAO 层初始化。
|
||||
cacheRepo := dao.NewCacheDAO(rdb)
|
||||
_ = db.Use(middleware.NewGormCachePlugin(cacheRepo))
|
||||
_ = db.Use(gormcache.NewGormCachePlugin(cacheRepo))
|
||||
|
||||
// Service 层初始化。
|
||||
userAuthClient, err := gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
userAuthClient, err := userauthclient.NewClient(userauthclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
|
||||
Target: viper.GetString("userauth.rpc.target"),
|
||||
Timeout: viper.GetDuration("userauth.rpc.timeout"),
|
||||
@@ -178,7 +184,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize userauth zrpc client: %w", err)
|
||||
}
|
||||
notificationClient, err := gatewaynotification.NewClient(gatewaynotification.ClientConfig{
|
||||
notificationClient, err := notificationclient.NewClient(notificationclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("notification.rpc.endpoints"),
|
||||
Target: viper.GetString("notification.rpc.target"),
|
||||
Timeout: viper.GetDuration("notification.rpc.timeout"),
|
||||
@@ -186,7 +192,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize notification zrpc client: %w", err)
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
scheduleClient, err := scheduleclient.NewClient(scheduleclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
@@ -194,7 +200,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schedule zrpc client: %w", err)
|
||||
}
|
||||
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
|
||||
taskClient, err := taskclient.NewClient(taskclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
@@ -202,7 +208,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize task zrpc client: %w", err)
|
||||
}
|
||||
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
|
||||
taskClassClient, err := taskclassclient.NewClient(taskclassclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
@@ -210,7 +216,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize task-class zrpc client: %w", err)
|
||||
}
|
||||
courseClient, err := gatewaycourse.NewClient(gatewaycourse.ClientConfig{
|
||||
courseClient, err := courseclient.NewClient(courseclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("course.rpc.endpoints"),
|
||||
Target: viper.GetString("course.rpc.target"),
|
||||
Timeout: viper.GetDuration("course.rpc.timeout"),
|
||||
@@ -219,7 +225,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize course zrpc client: %w", err)
|
||||
}
|
||||
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
|
||||
memoryClient, err := memoryclient.NewClient(memoryclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
|
||||
Target: viper.GetString("memory.rpc.target"),
|
||||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||||
@@ -227,7 +233,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize memory zrpc client: %w", err)
|
||||
}
|
||||
agentRPCClient, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
|
||||
agentRPCClient, err := agentclient.NewClient(agentclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
|
||||
Target: viper.GetString("agent.rpc.target"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
@@ -235,7 +241,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize agent zrpc client: %w", err)
|
||||
}
|
||||
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
|
||||
activeSchedulerClient, err := activeschedulerclient.NewClient(activeschedulerclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
|
||||
Target: viper.GetString("activeScheduler.rpc.target"),
|
||||
Timeout: viper.GetDuration("activeScheduler.rpc.timeout"),
|
||||
@@ -251,7 +257,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if shouldBuildGatewayAgentFallback() {
|
||||
log.Println("Gateway agent RPC fallback is enabled; building local AgentService compatibility path")
|
||||
|
||||
aiHub, err := inits.InitEino()
|
||||
aiHub, err := einoinfra.InitEino()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize Eino: %w", err)
|
||||
}
|
||||
@@ -273,8 +279,9 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
|
||||
agentCacheRepo = dao.NewAgentCache(rdb)
|
||||
taskRepo := dao.NewTaskDAO(db)
|
||||
taskServiceRepo := taskdao.NewTaskDAO(db)
|
||||
taskClassRepo := dao.NewTaskClassDAO(db)
|
||||
scheduleRepo := dao.NewScheduleDAO(db)
|
||||
scheduleServiceRepo := scheduledao.NewScheduleDAO(db)
|
||||
manager = dao.NewManager(db)
|
||||
agentRepo = dao.NewAgentDAO(db)
|
||||
outboxRepo = outboxinfra.NewRepository(db)
|
||||
@@ -286,9 +293,9 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
return nil, fmt.Errorf("failed to register task outbox route: %w", err)
|
||||
}
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
taskSv := service.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskSv := tasksv.NewTaskService(taskServiceRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskSv.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := service.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
scheduleService := schedulesv.NewScheduleService(scheduleServiceRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService = agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
@@ -488,13 +495,13 @@ func (p *repositoryOutboxPublisher) Publish(ctx context.Context, req outboxinfra
|
||||
return err
|
||||
}
|
||||
|
||||
func buildCourseService(llmService *llmservice.Service, courseRepo *dao.CourseDAO, scheduleRepo *dao.ScheduleDAO) *service.CourseService {
|
||||
func buildCourseService(llmService *llmservice.Service, courseRepo *coursedao.CourseDAO, scheduleRepo *dao.ScheduleDAO) *coursesv.CourseService {
|
||||
courseImageResponsesClient := llmService.CourseImageResponsesClient()
|
||||
return service.NewCourseService(
|
||||
return coursesv.NewCourseService(
|
||||
courseRepo,
|
||||
scheduleRepo,
|
||||
courseImageResponsesClient,
|
||||
service.NewCourseImageParseConfig(
|
||||
coursesv.NewCourseImageParseConfig(
|
||||
viper.GetInt64("courseImport.maxImageBytes"),
|
||||
viper.GetInt("courseImport.maxTokens"),
|
||||
),
|
||||
@@ -827,7 +834,7 @@ func buildAPIHandlers(
|
||||
courseClient ports.CourseCommandClient,
|
||||
scheduleClient ports.ScheduleCommandClient,
|
||||
agentService *agentsv.AgentService,
|
||||
agentRPCClient *gatewayagent.Client,
|
||||
agentRPCClient *agentclient.Client,
|
||||
memoryClient ports.MemoryCommandClient,
|
||||
activeSchedulerClient ports.ActiveSchedulerCommandClient,
|
||||
notificationClient ports.NotificationCommandClient,
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
taskclassdao "github.com/LoveLosita/smartflow/backend/services/task_class/dao"
|
||||
taskclassrpc "github.com/LoveLosita/smartflow/backend/services/task_class/rpc"
|
||||
taskclasssv "github.com/LoveLosita/smartflow/backend/services/task_class/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ func main() {
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize task-class cache deleter: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,14 +7,14 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||||
taskrpc "github.com/LoveLosita/smartflow/backend/services/task/rpc"
|
||||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -37,7 +37,7 @@ func main() {
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize task cache deleter: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -3,10 +3,10 @@ package main
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
userauthdao "github.com/LoveLosita/smartflow/backend/services/userauth/dao"
|
||||
userauthrpc "github.com/LoveLosita/smartflow/backend/services/userauth/rpc"
|
||||
userauthsv "github.com/LoveLosita/smartflow/backend/services/userauth/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
agentclient "github.com/LoveLosita/smartflow/backend/client/agent"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
agentcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/agent"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
|
||||
type AgentHandler struct {
|
||||
svc *agentsv.AgentService
|
||||
rpcClient *gatewayagent.Client
|
||||
rpcClient *agentclient.Client
|
||||
rpcClientMu sync.Mutex
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func NewAgentHandler(svc *agentsv.AgentService) *AgentHandler {
|
||||
// 2. agent RPC 作为 chat stream 与非 chat /agent/* 查询/命令的服务间通道;
|
||||
// 3. svc 只用于 RPC 开关关闭时的迁移期 fallback,当前默认可为 nil;
|
||||
// 4. rpcClient 为空时允许按配置懒加载,避免测试和旧装配必须提前构造 client。
|
||||
func NewAgentHandlerWithRPC(svc *agentsv.AgentService, rpcClient *gatewayagent.Client) *AgentHandler {
|
||||
func NewAgentHandlerWithRPC(svc *agentsv.AgentService, rpcClient *agentclient.Client) *AgentHandler {
|
||||
return &AgentHandler{
|
||||
svc: svc,
|
||||
rpcClient: rpcClient,
|
||||
@@ -302,7 +302,7 @@ func writeAgentSSEError(w io.Writer, err error) {
|
||||
_ = writeSSEData(w, "[DONE]")
|
||||
}
|
||||
|
||||
func (api *AgentHandler) getAgentRPCClient() (*gatewayagent.Client, error) {
|
||||
func (api *AgentHandler) getAgentRPCClient() (*agentclient.Client, error) {
|
||||
if api == nil {
|
||||
return nil, errors.New("agent handler is not initialized")
|
||||
}
|
||||
@@ -314,7 +314,7 @@ func (api *AgentHandler) getAgentRPCClient() (*gatewayagent.Client, error) {
|
||||
return api.rpcClient, nil
|
||||
}
|
||||
|
||||
client, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
|
||||
client, err := agentclient.NewClient(agentclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
|
||||
Target: viper.GetString("agent.rpc.target"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
coursecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/course"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
memorycontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/memory"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/notification"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
taskclasscontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclass"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
taskcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/task"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/userauth"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -2,8 +2,8 @@ package userauthapi
|
||||
|
||||
import (
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
// 1. 只注册 /user 下的边缘路由,不关心其它业务域路由;
|
||||
// 2. 登录、注册、刷新 token 只做请求转发;登出需要先经过 access token 边缘鉴权;
|
||||
// 3. 限流仍复用当前通用中间件,后续若 gateway 独立成包,可再整体下沉。
|
||||
func RegisterRoutes(apiGroup *gin.RouterGroup, handler *UserHandler, authClient ports.AccessTokenValidator, limiter *pkg.RateLimiter) {
|
||||
func RegisterRoutes(apiGroup *gin.RouterGroup, handler *UserHandler, authClient ports.AccessTokenValidator, limiter *ratelimit.RateLimiter) {
|
||||
if apiGroup == nil || handler == nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RateLimitMiddleware(limiter *pkg.RateLimiter, capacity, rate int) gin.HandlerFunc {
|
||||
func RateLimitMiddleware(limiter *ratelimit.RateLimiter, capacity, rate int) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// 1. 确定限流对象:可以用 UserID,也可以用 IP
|
||||
// 这里建议用 UserID,防止某个用户换 IP 疯狂刷
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||
userauthapi "github.com/LoveLosita/smartflow/backend/gateway/api/userauth"
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/spf13/viper"
|
||||
@@ -55,7 +55,7 @@ func StartEngine(ctx context.Context, r *gin.Engine) {
|
||||
}
|
||||
}
|
||||
|
||||
func RegisterRouters(handlers *api.ApiHandlers, authClient ports.UserAuthClient, cache *dao.CacheDAO, limiter *pkg.RateLimiter) *gin.Engine {
|
||||
func RegisterRouters(handlers *api.ApiHandlers, authClient ports.UserAuthClient, cache *dao.CacheDAO, limiter *ratelimit.RateLimiter) *gin.Engine {
|
||||
r := gin.Default()
|
||||
apiGroup := r.Group("/api/v1")
|
||||
{
|
||||
|
||||
78
backend/gateway/shared/respond/respond.go
Normal file
78
backend/gateway/shared/respond/respond.go
Normal file
@@ -0,0 +1,78 @@
|
||||
// Package respond 承载 gateway HTTP 门面使用的响应适配入口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只面向 gateway/api 与 gateway/middleware,统一 HTTP JSON 写回与错误响应常量的 import 位置;
|
||||
// 2. 迁移期继续复用根 backend/respond 的响应码和错误语义,避免一次性改动服务层、RPC 层和 client 层;
|
||||
// 3. 不承载任何服务私有业务逻辑,服务代码禁止反向 import backend/gateway/shared/respond。
|
||||
package respond
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
rootrespond "github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type (
|
||||
// Response 是 gateway 透传给前端的项目响应码结构。
|
||||
Response = rootrespond.Response
|
||||
|
||||
// FinalResponse 是带 data 字段的统一 HTTP 响应结构。
|
||||
FinalResponse = rootrespond.FinalResponse
|
||||
)
|
||||
|
||||
var (
|
||||
Ok = rootrespond.Ok
|
||||
UserTasksEmpty = rootrespond.UserTasksEmpty
|
||||
NoOngoingOrUpcomingSchedule = rootrespond.NoOngoingOrUpcomingSchedule
|
||||
TaskAlreadyDeleted = rootrespond.TaskAlreadyDeleted
|
||||
WrongParamType = rootrespond.WrongParamType
|
||||
MissingParam = rootrespond.MissingParam
|
||||
MissingIdempotencyKey = rootrespond.MissingIdempotencyKey
|
||||
MissingToken = rootrespond.MissingToken
|
||||
InvalidClaims = rootrespond.InvalidClaims
|
||||
ErrUnauthorized = rootrespond.ErrUnauthorized
|
||||
RequestIsProcessing = rootrespond.RequestIsProcessing
|
||||
ScheduleConflict = rootrespond.ScheduleConflict
|
||||
TooManyRequests = rootrespond.TooManyRequests
|
||||
TokenUsageExceedsLimit = rootrespond.TokenUsageExceedsLimit
|
||||
ConversationNotFound = rootrespond.ConversationNotFound
|
||||
MissingConversationID = rootrespond.MissingConversationID
|
||||
)
|
||||
|
||||
// RespWithData 为 gateway HTTP 门面生成带 data 的统一响应体。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做响应结构组装,不决定 HTTP 状态码;
|
||||
// 2. 响应码来源仍是根 respond,保证迁移前后前端协议不变。
|
||||
func RespWithData(response Response, data interface{}) FinalResponse {
|
||||
return rootrespond.RespWithData(response, data)
|
||||
}
|
||||
|
||||
// DealWithError 将项目 error 映射为 HTTP JSON 响应。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只在 gateway HTTP 层写响应;
|
||||
// 2. 业务错误语义仍由根 respond 统一维护;
|
||||
// 3. nil error 直接忽略,保持旧 DealWithError 的降级语义。
|
||||
func DealWithError(c *gin.Context, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
var resp Response
|
||||
if errors.Is(err, UserTasksEmpty) || errors.Is(err, NoOngoingOrUpcomingSchedule) || errors.Is(err, TaskAlreadyDeleted) {
|
||||
c.JSON(http.StatusOK, err)
|
||||
return
|
||||
}
|
||||
if errors.As(err, &resp) {
|
||||
c.JSON(resp.HTTPStatus(), resp)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, InternalError(err))
|
||||
}
|
||||
|
||||
// InternalError 生成 500 类响应体,供 gateway 依赖缺失等边缘错误使用。
|
||||
func InternalError(err error) Response {
|
||||
return rootrespond.InternalError(err)
|
||||
}
|
||||
@@ -1,640 +0,0 @@
|
||||
# HANDOFF:RAG Infra 一步到位接入方案
|
||||
|
||||
## 1. 文档目的
|
||||
|
||||
本文用于把 `backend/infra/rag` 从“可运行骨架”推进到“可被业务正式接入的共享基础设施”。
|
||||
|
||||
本文重点回答 4 个问题:
|
||||
|
||||
1. 当前 `RAG Infra` 已经做到了什么,还缺什么。
|
||||
2. 什么样的状态,才算“合格、可接入、可灰度、可回滚”的 `RAG Infra`。
|
||||
3. 如何以“依赖注入 + 对外只暴露方法入口”的方式收口,避免业务侧直接依赖底层实现细节。
|
||||
4. 如何在不打断现有业务的前提下,把 `memory` 与 `websearch` 并行迁移到统一 `RAG Infra`。
|
||||
|
||||
---
|
||||
|
||||
## 2. 当前现状
|
||||
|
||||
## 2.1 已完成部分
|
||||
|
||||
当前 `backend/infra/rag` 已经具备共享骨架,主要包括:
|
||||
|
||||
1. 通用接口与类型:
|
||||
- `core/interfaces.go`
|
||||
- `core/types.go`
|
||||
- `core/errors.go`
|
||||
2. 通用编排器:
|
||||
- `core/pipeline.go`
|
||||
3. 默认切块器:
|
||||
- `chunk/text_chunker.go`
|
||||
4. 语料适配器:
|
||||
- `corpus/memory_corpus.go`
|
||||
- `corpus/web_corpus.go`
|
||||
5. 默认可运行实现:
|
||||
- `embed/mock_embedder.go`
|
||||
- `rerank/noop_reranker.go`
|
||||
- `store/inmemory_store.go`
|
||||
6. 配置骨架:
|
||||
- `config/config.go`
|
||||
|
||||
这说明项目已经完成了“共享 RAG Core 的第一阶段搭骨架”,不再是单纯的设计想法。
|
||||
|
||||
## 2.2 当前存在的问题
|
||||
|
||||
虽然骨架已经有了,但距离“可正式接入的 Infra”还差关键几步:
|
||||
|
||||
1. 运行时没有正式装配入口。
|
||||
- 当前仍主要依赖 `rag.NewDefaultPipeline()`。
|
||||
- 启动阶段没有统一按配置组装 `embedder / store / reranker / corpus runtime`。
|
||||
2. 真实底层实现还是占位。
|
||||
- `embed/eino_embedder.go` 未实现。
|
||||
- `rerank/eino_reranker.go` 未实现。
|
||||
- `store/milvus_store.go` 未实现。
|
||||
3. 配置虽有结构,但还未真正接入运行链路。
|
||||
- `rag/config/config.go` 定义了 `rag.*` 配置。
|
||||
- `backend/cmd/start.go` 尚未实例化并注入 `RAG Runtime`。
|
||||
4. 业务尚未真正切流。
|
||||
- `memory` 读取链路还没有正式走 `Pipeline.Retrieve`。
|
||||
- `websearch` 还没有通过 `WebCorpus + Pipeline` 形成正式 WebRAG 路径。
|
||||
5. 工程化能力不完整。
|
||||
- 缺统一 timeout。
|
||||
- 缺统一日志字段。
|
||||
- 缺基础指标。
|
||||
- 缺单元测试与集成测试。
|
||||
6. 还存在潜在重复实现风险。
|
||||
- `retrieve/vector_retriever.go` 与 `core/pipeline.go` 都承载部分检索逻辑。
|
||||
- 若后续两套逻辑并存,容易出现行为漂移与维护成本上升。
|
||||
|
||||
## 2.3 当前状态结论
|
||||
|
||||
当前 `RAG Infra` 的状态,更准确地说是:
|
||||
|
||||
1. 已经完成“共享骨架搭建”。
|
||||
2. 还没有完成“统一装配、真实实现、正式接入、工程化收口”。
|
||||
3. 目前适合继续扩展,但还不适合直接作为长期稳定的业务依赖面。
|
||||
|
||||
---
|
||||
|
||||
## 3. 目标定义:什么叫“合格的 RAG Infra”
|
||||
|
||||
本轮改造完成后,`backend/infra/rag` 应满足以下标准:
|
||||
|
||||
1. 启动时可统一构造并注入,不再靠业务模块自行拼装底层依赖。
|
||||
2. 对外只暴露稳定方法入口,不暴露底层 `Pipeline / Store / Embedder / Reranker` 的装配细节。
|
||||
3. 支持按配置切换实现:
|
||||
- `inmemory / milvus`
|
||||
- `mock / eino`
|
||||
- `noop / eino`
|
||||
4. 支持 `memory` 与 `websearch` 两类语料复用同一套 `chunk / embed / retrieve / rerank / fallback` 流程。
|
||||
5. 支持灰度开关与回滚,不要求业务“一次性硬切流”。
|
||||
6. 支持基础观测:
|
||||
- 延迟
|
||||
- 命中数
|
||||
- fallback 原因
|
||||
- 错误码
|
||||
7. 具备最小可依赖测试集,保证公共层改动不会悄悄破坏业务。
|
||||
|
||||
---
|
||||
|
||||
## 4. 核心改造原则
|
||||
|
||||
## 4.1 原则一:依赖注入统一由 Infra 自己负责
|
||||
|
||||
`RAG Infra` 必须自己承接“底层实现装配”,业务侧不应感知:
|
||||
|
||||
1. 当前用的是 `Milvus` 还是 `InMemoryStore`。
|
||||
2. 当前用的是 `MockEmbedder` 还是 `EinoEmbedder`。
|
||||
3. 当前是否开启 `Reranker`。
|
||||
4. 当前超时、阈值、切块参数是多少。
|
||||
|
||||
业务只拿到一个已经注入好的 `RAG Runtime` 或 `RAG Service`,直接调用方法。
|
||||
|
||||
## 4.2 原则二:对外只暴露方法,不暴露底层零件
|
||||
|
||||
业务层不应直接依赖这些细粒度对象:
|
||||
|
||||
1. `core.Pipeline`
|
||||
2. `core.VectorStore`
|
||||
3. `core.Embedder`
|
||||
4. `core.Reranker`
|
||||
5. `corpus.MemoryCorpus`
|
||||
6. `corpus.WebCorpus`
|
||||
|
||||
这些对象应被视为 `infra/rag` 内部拼装细节。
|
||||
|
||||
业务层只应调用诸如以下方法:
|
||||
|
||||
1. `IngestMemory`
|
||||
2. `RetrieveMemory`
|
||||
3. `IngestWeb`
|
||||
4. `RetrieveWeb`
|
||||
|
||||
这样做的好处是:
|
||||
|
||||
1. 业务依赖面更稳定。
|
||||
2. 后续替换底层实现时,不会把改动扩散到多个业务模块。
|
||||
3. 便于统一日志、监控、降级和权限边界。
|
||||
|
||||
## 4.3 原则三:业务语义留在业务层,通用 RAG 工序下沉到 Infra
|
||||
|
||||
下沉到 `infra/rag` 的内容:
|
||||
|
||||
1. 切块
|
||||
2. 向量化
|
||||
3. 向量存储
|
||||
4. 召回
|
||||
5. rerank
|
||||
6. threshold 过滤
|
||||
7. fallback 语义
|
||||
8. 统一日志与指标
|
||||
|
||||
留在业务层的内容:
|
||||
|
||||
1. `memory` 的注入优先级、门控规则、显式/隐式策略
|
||||
2. `websearch` 的 provider 搜索、query 改写、时间过滤、domain 白名单、抓取策略
|
||||
3. 最终给模型注入哪些证据、注入多少、如何组织引用
|
||||
|
||||
## 4.4 原则四:并行迁移,不一步删旧
|
||||
|
||||
本轮改造虽然目标是“一步到位把 Infra 做完整”,但切流必须保持并行迁移:
|
||||
|
||||
1. 新 Infra 建好后,先让 `memory` 接入并保留旧逻辑兜底。
|
||||
2. 再让 `websearch` 接入并保留 V1 路径兜底。
|
||||
3. 观察稳定后再删除旧分支。
|
||||
|
||||
---
|
||||
|
||||
## 5. 目标架构
|
||||
|
||||
## 5.1 推荐对外结构
|
||||
|
||||
建议在 `backend/infra/rag` 新增统一对外门面,例如:
|
||||
|
||||
1. `runtime.go`
|
||||
2. `factory.go`
|
||||
3. `service.go`
|
||||
|
||||
推荐把正式对外依赖面收敛为一个接口,例如:
|
||||
|
||||
```go
|
||||
type Runtime interface {
|
||||
IngestMemory(ctx context.Context, input MemoryIngestRequest) (*IngestResult, error)
|
||||
RetrieveMemory(ctx context.Context, input MemoryRetrieveRequest) (*RetrieveResult, error)
|
||||
|
||||
IngestWeb(ctx context.Context, input WebIngestRequest) (*IngestResult, error)
|
||||
RetrieveWeb(ctx context.Context, input WebRetrieveRequest) (*RetrieveResult, error)
|
||||
}
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
1. 业务侧只依赖 `Runtime`。
|
||||
2. `Runtime` 内部再去调用 `Pipeline + CorpusAdapter + Store + Embedder + Reranker`。
|
||||
3. 这样可以保证业务不会直接 import `core` 包下的底层细节。
|
||||
|
||||
## 5.2 推荐内部结构
|
||||
|
||||
建议内部形成以下分工:
|
||||
|
||||
1. `factory.go`
|
||||
- 负责按配置创建 `Embedder / Store / Reranker / Pipeline`
|
||||
2. `runtime.go`
|
||||
- 负责持有 `Pipeline + MemoryCorpus + WebCorpus + Logger + Metrics`
|
||||
3. `service.go`
|
||||
- 负责定义 `Runtime` 接口与对外方法
|
||||
4. `core/`
|
||||
- 保持底层通用编排逻辑
|
||||
5. `corpus/`
|
||||
- 只负责“语料 -> 标准文档”和“业务过滤 -> 标准 filter”
|
||||
|
||||
## 5.3 推荐依赖注入方式
|
||||
|
||||
在 `backend/cmd/start.go` 中,启动期统一创建 `RAG Runtime`,例如:
|
||||
|
||||
1. 读取 `rag.*` 配置
|
||||
2. 构造 `RAGFactory`
|
||||
3. 生成 `RAGRuntime`
|
||||
4. 注入给:
|
||||
- `memory service`
|
||||
- `newAgent web tools`
|
||||
|
||||
业务侧只拿运行好的对象,不再自己 new 任何底层实现。
|
||||
|
||||
---
|
||||
|
||||
## 6. 对外方法面设计
|
||||
|
||||
## 6.1 Memory 对外方法
|
||||
|
||||
推荐对外暴露以下方法:
|
||||
|
||||
1. `IngestMemory`
|
||||
- 输入:标准化后的记忆入库请求
|
||||
- 输出:文档数、chunk 数、同步结果
|
||||
2. `RetrieveMemory`
|
||||
- 输入:用户、会话、助手、run、query、topK、threshold
|
||||
- 输出:标准 `RetrieveResult`
|
||||
|
||||
注意:
|
||||
|
||||
1. `memory` 业务层不应直接调用 `MemoryCorpus`。
|
||||
2. `memory` 业务层不应自己拼向量过滤条件。
|
||||
3. 所有过滤条件由 `RetrieveMemory` 内部统一转换。
|
||||
|
||||
## 6.2 Web 对外方法
|
||||
|
||||
推荐对外暴露以下方法:
|
||||
|
||||
1. `IngestWeb`
|
||||
- 输入:抓取结果 `url/title/snippet/content/domain/query_id/session_id`
|
||||
- 输出:统一入库摘要
|
||||
2. `RetrieveWeb`
|
||||
- 输入:query、query_id/session_id、domain、topK、threshold
|
||||
- 输出:标准 `RetrieveResult`
|
||||
|
||||
注意:
|
||||
|
||||
1. `websearch` 业务层不应直接持有 `WebCorpus`。
|
||||
2. `websearch` 业务层只负责“拿到页面内容”与“决定是否需要调用 RAG”。
|
||||
3. 实际向量入库、检索、rerank 由 `infra/rag` 统一处理。
|
||||
|
||||
## 6.3 对外方法设计边界
|
||||
|
||||
方法层负责什么:
|
||||
|
||||
1. 参数合法性校验
|
||||
2. 内部 filter 组装
|
||||
3. 调 `Pipeline.Ingest / Retrieve`
|
||||
4. 统一日志、指标、fallback
|
||||
|
||||
方法层不负责什么:
|
||||
|
||||
1. 不负责 `websearch provider` 搜索
|
||||
2. 不负责 HTML 抓取
|
||||
3. 不负责 prompt 注入
|
||||
4. 不负责业务排序偏好
|
||||
|
||||
---
|
||||
|
||||
## 7. 具体改造计划
|
||||
|
||||
## 7.1 第一部分:把 RAG Infra 自身做完整
|
||||
|
||||
### 目标
|
||||
|
||||
让 `backend/infra/rag` 成为“正式可注入、正式可切换、正式可依赖”的共享基础设施。
|
||||
|
||||
### 实施项
|
||||
|
||||
1. 新增正式运行时与工厂:
|
||||
- `backend/infra/rag/runtime.go`
|
||||
- `backend/infra/rag/factory.go`
|
||||
- 如有需要,新增 `backend/infra/rag/service.go`
|
||||
2. 扩展配置:
|
||||
- `rag.enabled`
|
||||
- `rag.store`
|
||||
- `rag.embed.provider`
|
||||
- `rag.embed.model`
|
||||
- `rag.embed.timeoutMs`
|
||||
- `rag.embed.dimension`
|
||||
- `rag.reranker.provider`
|
||||
- `rag.reranker.timeoutMs`
|
||||
- `rag.retrieve.timeoutMs`
|
||||
- `rag.ingest.chunkSize`
|
||||
- `rag.ingest.chunkOverlap`
|
||||
3. 收口运行入口:
|
||||
- `rag.NewDefaultPipeline()` 保留为本地 fallback
|
||||
- 正式业务接入走 `NewRuntimeFromConfig(...)`
|
||||
4. 消除重复检索路径:
|
||||
- 明确 `Pipeline` 是官方检索入口
|
||||
- `retrieve/vector_retriever.go` 要么内聚为内部实现,要么后续删除,避免双轨
|
||||
|
||||
### 验收
|
||||
|
||||
1. 启动期可按配置成功构造 `RAG Runtime`。
|
||||
2. 业务侧不需要自己组装 `Pipeline / Store / Embedder / Reranker`。
|
||||
3. 对外暴露面稳定,底层实现可替换。
|
||||
|
||||
## 7.2 第二部分:补齐真实底层实现
|
||||
|
||||
### 目标
|
||||
|
||||
让 `RAG Infra` 具备真实可用的向量能力,而不是停留在 mock。
|
||||
|
||||
### 实施项
|
||||
|
||||
1. 实现 `embed/eino_embedder.go`
|
||||
- 负责 embedding 调用
|
||||
- 负责 embedding timeout
|
||||
- 负责错误包装与统一日志
|
||||
2. 实现 `rerank/eino_reranker.go`
|
||||
- 负责 rerank 调用
|
||||
- 负责 rerank timeout
|
||||
- 负责失败降级到原排序
|
||||
3. 实现 `store/milvus_store.go`
|
||||
- `Upsert`
|
||||
- `Search`
|
||||
- `Delete`
|
||||
- `Get`
|
||||
4. Milvus 元数据设计建议:
|
||||
- 高频过滤字段应做显式标量字段,不建议全部依赖大 JSON 过滤
|
||||
- 重点字段包括:
|
||||
- `corpus`
|
||||
- `user_id`
|
||||
- `assistant_id`
|
||||
- `conversation_id`
|
||||
- `run_id`
|
||||
- `memory_type`
|
||||
- `query_id`
|
||||
- `session_id`
|
||||
- `domain`
|
||||
|
||||
### 验收
|
||||
|
||||
1. `MilvusStore` 在已准备好的 Docker 环境中可稳定完成写入与检索。
|
||||
2. `EinoEmbedder` 和 `EinoReranker` 可按配置启用。
|
||||
3. provider 波动时,主链路仍能 fallback。
|
||||
|
||||
## 7.3 第三部分:补齐工程化能力
|
||||
|
||||
### 目标
|
||||
|
||||
让 `RAG Infra` 具备“可观测、可测试、可回滚”的基础设施属性。
|
||||
|
||||
### 实施项
|
||||
|
||||
1. timeout 接线:
|
||||
- embedding timeout
|
||||
- retrieve timeout
|
||||
- rerank timeout
|
||||
2. 统一日志字段:
|
||||
- `trace_id`
|
||||
- `corpus`
|
||||
- `action`
|
||||
- `provider`
|
||||
- `latency_ms`
|
||||
- `hit_count`
|
||||
- `fallback_reason`
|
||||
3. 指标补齐:
|
||||
- `rag_ingest_count`
|
||||
- `rag_retrieve_count`
|
||||
- `rag_hit_count`
|
||||
- `rag_fallback_rate`
|
||||
- `rag_latency_ms`
|
||||
4. 测试补齐:
|
||||
- `chunker` 单测
|
||||
- `corpus filter` 单测
|
||||
- `pipeline fallback` 单测
|
||||
- `MilvusStore` 集成测试
|
||||
- `memory/web` 过滤隔离测试
|
||||
|
||||
### 验收
|
||||
|
||||
1. 出现检索问题时,可从日志定位是:
|
||||
- 没命中
|
||||
- 超时
|
||||
- rerank 降级
|
||||
- filter 过滤过严
|
||||
2. 公共层测试可稳定覆盖关键路径。
|
||||
|
||||
## 7.4 第四部分:接入 Memory
|
||||
|
||||
### 目标
|
||||
|
||||
让 `memory` 成为第一个正式接入 `RAG Infra` 的业务域。
|
||||
|
||||
### 实施项
|
||||
|
||||
1. 写入链路接入:
|
||||
- 在 memory worker 成功写入 `memory_items` 后,调用 `RAGRuntime.IngestMemory`
|
||||
- 复用 `memory_items.vector_status/vector_id`
|
||||
2. 读取链路接入:
|
||||
- 在 `memory/service/read_service.go` 中新增 `RetrieveMemory` 路径
|
||||
- 强制过滤:
|
||||
- `user_id`
|
||||
- `assistant_id`
|
||||
- `conversation_id`
|
||||
- `run_id`
|
||||
3. 开关控制:
|
||||
- `memory.rag.enabled=false` 默认关闭
|
||||
- 打开后先灰度使用新路径
|
||||
4. 降级策略:
|
||||
- `RAG` 检索失败 -> 回退旧读取链路
|
||||
- `Reranker` 失败 -> 保留原始排序
|
||||
|
||||
### 验收
|
||||
|
||||
1. 开关关闭时行为与当前一致。
|
||||
2. 开关开启时,记忆召回可稳定工作。
|
||||
3. 失败时不会影响主链路回复。
|
||||
|
||||
## 7.5 第五部分:接入 WebSearch
|
||||
|
||||
### 目标
|
||||
|
||||
让 `websearch` 成为第二个正式接入 `RAG Infra` 的业务域,并复用 `WebCorpus`。
|
||||
|
||||
### 实施项
|
||||
|
||||
1. 保留 V1 路径:
|
||||
- `web_search` 做 provider 搜索
|
||||
- `web_fetch` 做正文抓取与清洗
|
||||
2. 新增 V2 路径:
|
||||
- 把抓取结果映射为 `WebIngestItem`
|
||||
- 调 `RAGRuntime.IngestWeb`
|
||||
- 再调 `RAGRuntime.RetrieveWeb`
|
||||
3. 强约束过滤:
|
||||
- `query_id` 或 `session_id` 至少有一个
|
||||
- 避免跨 query/session 串召回
|
||||
4. 开关控制:
|
||||
- `websearch.rag.enabled=false` 默认关闭
|
||||
5. 降级策略:
|
||||
- `web_rag_search` 失败 -> 回退到 `web_search + web_fetch`
|
||||
|
||||
### 验收
|
||||
|
||||
1. 新旧链路并存,互不影响。
|
||||
2. 新链路不会跨 query/session 串数据。
|
||||
3. 失败可立刻回退到 V1。
|
||||
|
||||
## 7.6 第六部分:启动接线与统一管理
|
||||
|
||||
### 目标
|
||||
|
||||
让 `RAG Runtime` 成为启动期统一装配、统一管理的依赖。
|
||||
|
||||
### 实施项
|
||||
|
||||
1. 在 `backend/cmd/start.go` 中:
|
||||
- 读取 `rag.*` 配置
|
||||
- 构造 `RAG Runtime`
|
||||
- 注入给 `memory` 与 `newAgent web tools`
|
||||
2. 统一由启动期管理依赖生命周期:
|
||||
- 初始化
|
||||
- 健康检查
|
||||
- 关闭清理
|
||||
3. 业务层禁止直接 new 底层实现:
|
||||
- 禁止业务自己构建 `MilvusStore`
|
||||
- 禁止业务自己构建 `EinoEmbedder`
|
||||
- 禁止业务自己拼 `Pipeline`
|
||||
|
||||
### 验收
|
||||
|
||||
1. 依赖管理集中在启动层。
|
||||
2. 业务代码只依赖方法入口,不接触底层实现。
|
||||
3. 后续替换实现时,无需大面积修改业务层代码。
|
||||
|
||||
---
|
||||
|
||||
## 8. 推荐目录改造方案
|
||||
|
||||
建议新增或调整如下文件:
|
||||
|
||||
1. `backend/infra/rag/runtime.go`
|
||||
2. `backend/infra/rag/factory.go`
|
||||
3. `backend/infra/rag/service.go`
|
||||
4. `backend/infra/rag/README.md` 或在本文件持续追加
|
||||
5. `backend/infra/rag/embed/eino_embedder.go`
|
||||
6. `backend/infra/rag/rerank/eino_reranker.go`
|
||||
7. `backend/infra/rag/store/milvus_store.go`
|
||||
8. `backend/infra/rag/core/pipeline_test.go`
|
||||
9. `backend/infra/rag/chunk/text_chunker_test.go`
|
||||
10. `backend/infra/rag/corpus/memory_corpus_test.go`
|
||||
11. `backend/infra/rag/corpus/web_corpus_test.go`
|
||||
12. `backend/infra/rag/store/milvus_store_integration_test.go`
|
||||
|
||||
配套改动文件:
|
||||
|
||||
1. `backend/cmd/start.go`
|
||||
2. `backend/config.example.yaml`
|
||||
3. `backend/memory/service/read_service.go`
|
||||
4. `backend/newAgent/tools/registry.go`
|
||||
5. `backend/agent/通用能力接入文档.md`
|
||||
|
||||
---
|
||||
|
||||
## 9. 配置建议
|
||||
|
||||
建议新增如下配置结构:
|
||||
|
||||
```yaml
|
||||
rag:
|
||||
enabled: true
|
||||
store: "milvus"
|
||||
topK: 8
|
||||
threshold: 0.55
|
||||
retrieve:
|
||||
timeoutMs: 1500
|
||||
ingest:
|
||||
chunkSize: 400
|
||||
chunkOverlap: 80
|
||||
embed:
|
||||
provider: "eino"
|
||||
model: ""
|
||||
timeoutMs: 1200
|
||||
dimension: 1024
|
||||
reranker:
|
||||
enabled: true
|
||||
provider: "eino"
|
||||
timeoutMs: 1200
|
||||
|
||||
memory:
|
||||
rag:
|
||||
enabled: false
|
||||
|
||||
websearch:
|
||||
rag:
|
||||
enabled: false
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
1. `rag.enabled` 控制公共层是否启用。
|
||||
2. `memory.rag.enabled` 与 `websearch.rag.enabled` 控制业务级切流。
|
||||
3. 即使 `rag.enabled=true`,也不代表所有业务立刻默认走新链路。
|
||||
|
||||
---
|
||||
|
||||
## 10. 回滚策略
|
||||
|
||||
推荐回滚顺序如下:
|
||||
|
||||
1. 先关业务级开关:
|
||||
- `memory.rag.enabled=false`
|
||||
- `websearch.rag.enabled=false`
|
||||
2. 再关重排:
|
||||
- `rag.reranker.enabled=false`
|
||||
3. 再切底层实现:
|
||||
- `rag.store=inmemory`
|
||||
- `rag.embed.provider=mock`
|
||||
- `rag.reranker.provider=noop`
|
||||
4. 若仍异常,再回退到业务旧链路
|
||||
|
||||
这样可以做到:
|
||||
|
||||
1. 不因单个 provider 波动打断主流程。
|
||||
2. 保留最小可用能力。
|
||||
3. 故障定位粒度更细。
|
||||
|
||||
---
|
||||
|
||||
## 11. 风险与应对
|
||||
|
||||
1. 风险:Milvus 过滤能力与现有 metadata 结构不匹配。
|
||||
- 应对:高频过滤字段单独建模,不依赖大 JSON 粗暴过滤。
|
||||
2. 风险:embedding/rerank provider 波动影响延迟。
|
||||
- 应对:超时控制 + fallback + 业务级开关。
|
||||
3. 风险:业务层绕过 Infra 直接依赖底层实现。
|
||||
- 应对:通过 `Runtime` 方法面统一收口,代码评审禁止横向绕过。
|
||||
4. 风险:新旧检索路径长期并存导致维护成本上升。
|
||||
- 应对:本轮先保留兜底,稳定后明确删除旧实现。
|
||||
5. 风险:跨 query/session 串召回。
|
||||
- 应对:`WebRetrieve` 强制校验 `query_id/session_id` 至少其一存在。
|
||||
|
||||
---
|
||||
|
||||
## 12. 最小落地顺序
|
||||
|
||||
如果按“尽快落成可接入 Infra”的优先级来排,本轮建议顺序如下:
|
||||
|
||||
1. 先做 `runtime/factory/service`,把依赖注入和方法面收口。
|
||||
2. 再实现 `MilvusStore + EinoEmbedder + EinoReranker`。
|
||||
3. 再补 timeout、日志、指标、测试。
|
||||
4. 然后优先接 `memory`。
|
||||
5. 最后接 `websearch`。
|
||||
|
||||
原因:
|
||||
|
||||
1. 若先接业务、不先收口方法面,后面会把底层细节泄露到业务层。
|
||||
2. 若先接 websearch、不先接 memory,会导致共享 Infra 价值不够集中,面试叙事也不完整。
|
||||
|
||||
---
|
||||
|
||||
## 13. 本轮完成后的预期收益
|
||||
|
||||
完成本方案后,项目会获得以下收益:
|
||||
|
||||
1. `memory` 与 `websearch` 共享一套真正可运行的 RAG 基础设施。
|
||||
2. 业务侧不再重复实现切块、召回、重排与降级逻辑。
|
||||
3. `infra/rag` 成为正式公共能力,具备统一依赖注入与统一管理能力。
|
||||
4. 后续新增新语料域时,只需新增 `CorpusAdapter + 方法面`,无需再复制一套 RAG 链路。
|
||||
5. 项目简历叙事会更完整:
|
||||
- “抽象并实现共享 RAG Infra”
|
||||
- “统一 Memory/WebSearch 的检索与重排能力”
|
||||
- “通过依赖注入与门面方法收口底层复杂度”
|
||||
|
||||
---
|
||||
|
||||
## 14. 当前建议结论
|
||||
|
||||
建议把本轮目标明确为:
|
||||
|
||||
1. **不是**“再给 RAG 补几个占位实现”。
|
||||
2. **而是**“把 `backend/infra/rag` 一次性做成正式可接入的公共基础设施”。
|
||||
|
||||
关键落点是两句话:
|
||||
|
||||
1. 依赖注入统一由 `infra/rag` 自己负责。
|
||||
2. 对外只暴露方法入口,业务侧不直接接触底层实现细节。
|
||||
|
||||
只要这两点收住,后续 `memory`、`websearch`、甚至更多语料域都会明显更好管理。
|
||||
@@ -1,191 +0,0 @@
|
||||
# RAG 复用接口实施计划(Memory + WebSearch 统一底座)
|
||||
|
||||
## 1. 目标与原则
|
||||
|
||||
1. 在 `backend/infra/rag` 抽离共享 RAG Core,统一 `chunk/embed/retrieve/rerank` 能力。
|
||||
2. 先接入 `MemoryCorpus` 与 `WebCorpus` 两个适配器,避免后续重复造轮子。
|
||||
3. 保持“并行迁移”策略:新老链路并存,先接入、再灰度、再切流、最后删除旧实现。
|
||||
4. 不阻塞现有主链路;任何 RAG 子能力失败都必须可降级。
|
||||
|
||||
## 2. 本轮范围与非目标
|
||||
|
||||
### 2.1 本轮范围
|
||||
|
||||
1. 定义 RAG Core 接口、标准数据结构、错误码和回退语义。
|
||||
2. 提供 `MemoryCorpus` 与 `WebCorpus` 适配层设计。
|
||||
3. 给出分阶段落地步骤、验收标准、风险控制。
|
||||
|
||||
### 2.2 本轮非目标
|
||||
|
||||
1. 不在本轮实现完整生产级向量检索细节(Milvus 连接器可先占位)。
|
||||
2. 不在本轮统一改造所有调用方,只做首批接入点。
|
||||
3. 不在本轮引入多 Provider 工厂(先保证单 Provider 可替换)。
|
||||
|
||||
## 3. 目录与模块规划
|
||||
|
||||
建议目录(先建骨架,逐轮填实):
|
||||
|
||||
```text
|
||||
backend/infra/rag/
|
||||
core/
|
||||
types.go
|
||||
interfaces.go
|
||||
pipeline.go
|
||||
errors.go
|
||||
chunk/
|
||||
text_chunker.go
|
||||
embed/
|
||||
eino_embedder.go
|
||||
retrieve/
|
||||
vector_retriever.go
|
||||
rerank/
|
||||
eino_reranker.go
|
||||
store/
|
||||
vector_store.go
|
||||
milvus_store.go
|
||||
corpus/
|
||||
memory_corpus.go
|
||||
web_corpus.go
|
||||
config/
|
||||
config.go
|
||||
```
|
||||
|
||||
## 4. 核心接口设计(建议签名)
|
||||
|
||||
```go
|
||||
type Chunker interface {
|
||||
Chunk(ctx context.Context, doc SourceDocument, opt ChunkOption) ([]Chunk, error)
|
||||
}
|
||||
|
||||
type Embedder interface {
|
||||
Embed(ctx context.Context, texts []string, action string) ([][]float32, error)
|
||||
}
|
||||
|
||||
type Retriever interface {
|
||||
Retrieve(ctx context.Context, req RetrieveRequest) ([]ScoredChunk, error)
|
||||
}
|
||||
|
||||
type Reranker interface {
|
||||
Rerank(ctx context.Context, query string, candidates []ScoredChunk, topK int) ([]ScoredChunk, error)
|
||||
}
|
||||
|
||||
type VectorStore interface {
|
||||
Upsert(ctx context.Context, rows []VectorRow) error
|
||||
Search(ctx context.Context, req VectorSearchRequest) ([]ScoredVectorRow, error)
|
||||
Delete(ctx context.Context, ids []string) error
|
||||
Get(ctx context.Context, ids []string) ([]VectorRow, error)
|
||||
}
|
||||
|
||||
type CorpusAdapter interface {
|
||||
Name() string
|
||||
BuildIngestDocuments(ctx context.Context, input any) ([]SourceDocument, error)
|
||||
BuildRetrieveFilter(ctx context.Context, req any) (map[string]any, error)
|
||||
}
|
||||
```
|
||||
|
||||
## 5. 统一流程约定
|
||||
|
||||
### 5.1 Ingest 流程
|
||||
|
||||
1. `CorpusAdapter.BuildIngestDocuments` 生成标准文档。
|
||||
2. `Chunker.Chunk` 切块(固定 chunk_size + overlap)。
|
||||
3. `Embedder.Embed(action=add/update)` 生成向量。
|
||||
4. `VectorStore.Upsert` 写入。
|
||||
5. 任一步失败按“可补偿”记录状态,不影响主业务成功返回。
|
||||
|
||||
### 5.2 Retrieve 流程
|
||||
|
||||
1. `CorpusAdapter.BuildRetrieveFilter` 构建过滤条件。
|
||||
2. `Embedder.Embed(action=search)` 向量化 query。
|
||||
3. `VectorStore.Search` 召回候选。
|
||||
4. `threshold` 过滤。
|
||||
5. 可选 `Reranker` 重排;失败则 fallback 到原排序并记录原因码。
|
||||
|
||||
## 6. 两类 Corpus 适配器设计
|
||||
|
||||
### 6.1 MemoryCorpus
|
||||
|
||||
1. 数据源:`memory_items`(结构化记忆事实)。
|
||||
2. 强约束过滤:`user_id + assistant_id + conversation_id`。
|
||||
3. 元数据:`memory_type/confidence/sensitivity_level/ttl_at/source_event_id`。
|
||||
4. 注入优先级:`constraint/preference` 高于 `fact/todo_hint`。
|
||||
|
||||
### 6.2 WebCorpus
|
||||
|
||||
1. 数据源:websearch 抓取结果(`url/title/snippet/content`)。
|
||||
2. 强约束过滤:`query_id/session_id`,避免跨问题污染。
|
||||
3. 元数据:`domain/published_at/fetched_at/language/source_rank`。
|
||||
4. 检索策略:先向量召回,再结合域名可信度做轻量加权。
|
||||
|
||||
## 7. 与 Eino 的集成方式
|
||||
|
||||
1. `embed/eino_embedder.go`:封装 Eino embedding 调用。
|
||||
2. `rerank/eino_reranker.go`:封装 Eino 重排调用。
|
||||
3. 统一配置入口:`rag.enabled/top_k/threshold/reranker_enabled/timeout`。
|
||||
4. 统一日志字段:`trace_id/corpus/action/fallback_reason/latency_ms/hit_count`。
|
||||
|
||||
## 8. 分阶段实施(建议 4 轮)
|
||||
|
||||
### Round 1:基础骨架(不切流)
|
||||
|
||||
1. 建 `infra/rag` 目录与接口、类型、错误码。
|
||||
2. 提供 `NoopReranker`、`MockEmbedder` 兜底实现。
|
||||
3. 验收:编译通过,主链路行为不变。
|
||||
|
||||
### Round 2:MemoryCorpus 接入(灰度)
|
||||
|
||||
1. 把记忆检索从“模块内直连”改为调用 RAG Core。
|
||||
2. 保留旧路径开关 `memory.rag.enabled`,默认关闭。
|
||||
3. 验收:开启开关后功能等价,失败可自动降级旧链路。
|
||||
|
||||
### Round 3:WebCorpus 接入(灰度)
|
||||
|
||||
1. websearch 召回改走 RAG Core。
|
||||
2. 加入 `web.rag.enabled` 灰度开关。
|
||||
3. 验收:检索可复用同一 pipeline,质量不低于旧实现。
|
||||
|
||||
### Round 4:统一切流与清理
|
||||
|
||||
1. 默认开启 RAG Core,旧链路保留一段观察窗口。
|
||||
2. 指标稳定后删除旧实现。
|
||||
3. 验收:两条业务链路均通过统一接口,文档与监控齐全。
|
||||
|
||||
## 9. 配置建议
|
||||
|
||||
```yaml
|
||||
rag:
|
||||
enabled: true
|
||||
topK: 8
|
||||
threshold: 0.55
|
||||
reranker:
|
||||
enabled: true
|
||||
timeoutMs: 1200
|
||||
ingest:
|
||||
chunkSize: 400
|
||||
chunkOverlap: 80
|
||||
retrieve:
|
||||
timeoutMs: 1500
|
||||
```
|
||||
|
||||
## 10. 验收标准(DoD)
|
||||
|
||||
1. 同一套 Core 能同时服务 Memory 与 WebSearch。
|
||||
2. `rerank` 异常时可观测地降级,不影响主功能可用性。
|
||||
3. 支持按 corpus 维度查看命中率、耗时、降级率。
|
||||
4. 新老链路可开关切换,回滚路径明确。
|
||||
|
||||
## 11. 风险与应对
|
||||
|
||||
1. 风险:一次性切流影响面大。
|
||||
应对:按 corpus 分轮灰度,先 Memory 后 Web。
|
||||
2. 风险:向量检索延迟波动。
|
||||
应对:超时控制 + fallback + 本地缓存热点 query。
|
||||
3. 风险:跨域检索串数据。
|
||||
应对:强制 filter 校验,不满足维度直接拒绝检索。
|
||||
|
||||
## 12. 下一步执行清单(紧接实现)
|
||||
|
||||
1. 先补 `core/interfaces.go + core/types.go + core/pipeline.go`。
|
||||
2. 再补 `corpus/memory_corpus.go`(首个适配器)。
|
||||
3. 然后给 websearch 接 `corpus/web_corpus.go` 占位适配器。
|
||||
4. 最后补 `store/milvus_store.go` 与配置接线(当前 docker compose 已准备 Milvus 依赖)。
|
||||
@@ -1,164 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
)
|
||||
|
||||
type CourseService struct {
|
||||
// 伸出手:准备接住 DAO
|
||||
courseDAO *dao.CourseDAO
|
||||
scheduleDAO *dao.ScheduleDAO
|
||||
courseImageResponsesClient *llmservice.ArkResponsesClient
|
||||
courseImageConfig CourseImageParseConfig
|
||||
courseImageModel string
|
||||
}
|
||||
|
||||
// NewCourseService 创建 CourseService 实例
|
||||
func NewCourseService(
|
||||
courseDAO *dao.CourseDAO,
|
||||
scheduleDAO *dao.ScheduleDAO,
|
||||
courseImageResponsesClient *llmservice.ArkResponsesClient,
|
||||
courseImageConfig CourseImageParseConfig,
|
||||
courseImageModel string,
|
||||
) *CourseService {
|
||||
return &CourseService{
|
||||
courseDAO: courseDAO,
|
||||
scheduleDAO: scheduleDAO,
|
||||
courseImageResponsesClient: courseImageResponsesClient,
|
||||
courseImageConfig: courseImageConfig,
|
||||
courseImageModel: strings.TrimSpace(courseImageModel),
|
||||
}
|
||||
}
|
||||
|
||||
func isUniqueViolation(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// 兼容常见 MySQL / PostgreSQL / SQLite 的报错关键字
|
||||
// 也可以进一步精确到你的索引名 idx_user_slot_atomic
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "duplicate entry") ||
|
||||
strings.Contains(msg, "unique constraint") ||
|
||||
strings.Contains(msg, "unique violation") ||
|
||||
strings.Contains(msg, "duplicate key") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CheckSingleCourse(req model.UserCheckCourseRequest) bool {
|
||||
for _, arrangement := range req.Arrangements {
|
||||
if arrangement.StartWeek > arrangement.EndWeek ||
|
||||
arrangement.DayOfWeek < 1 || arrangement.DayOfWeek > 7 ||
|
||||
arrangement.StartSection < 1 || arrangement.EndSection < arrangement.StartSection ||
|
||||
arrangement.EndSection > 12 || arrangement.StartWeek < 1 || arrangement.EndWeek > 24 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AddUserCourses 添加用户课程表
|
||||
func (ss *CourseService) AddUserCourses(ctx context.Context, req model.UserImportCoursesRequest, userID int) ([]model.ScheduleConflictDetail, error) {
|
||||
//1.先校验参数是否正确
|
||||
for _, course := range req.Courses {
|
||||
result := CheckSingleCourse(course)
|
||||
if !result {
|
||||
return nil, respond.WrongCourseInfo
|
||||
}
|
||||
}
|
||||
//2.将前端传来的课程信息转换为 Schedule 和 ScheduleEvent 切片
|
||||
var finalSchedules []model.Schedule
|
||||
var finalScheduleEvents []model.ScheduleEvent
|
||||
var pos []int
|
||||
for _, course := range req.Courses {
|
||||
// 避免取 range 迭代变量字段地址导致指针复用问题
|
||||
location := course.Location
|
||||
for _, arrangement := range course.Arrangements {
|
||||
weekType := arrangement.WeekType
|
||||
for week := arrangement.StartWeek; week <= arrangement.EndWeek; week++ {
|
||||
if weekType == "odd" && week%2 == 0 {
|
||||
continue
|
||||
}
|
||||
if weekType == "even" && week%2 != 0 {
|
||||
continue
|
||||
}
|
||||
//2.转换为 Schedule_event 切片
|
||||
st, ed, err := conv.RelativeTimeToRealTime(week, arrangement.DayOfWeek, arrangement.StartSection, arrangement.EndSection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheduleEvent := model.ScheduleEvent{
|
||||
UserID: userID,
|
||||
Name: course.CourseName,
|
||||
Location: &location,
|
||||
Type: "course",
|
||||
RelID: nil,
|
||||
CanBeEmbedded: course.IsAllowTasks,
|
||||
StartTime: st,
|
||||
EndTime: ed,
|
||||
}
|
||||
finalScheduleEvents = append(finalScheduleEvents, scheduleEvent)
|
||||
//3.转换为 Schedule 切片
|
||||
for section := arrangement.StartSection; section <= arrangement.EndSection; section++ {
|
||||
schedule := model.Schedule{
|
||||
Week: week,
|
||||
DayOfWeek: arrangement.DayOfWeek,
|
||||
Section: section,
|
||||
Status: "normal",
|
||||
UserID: userID,
|
||||
EventID: 0,
|
||||
}
|
||||
finalSchedules = append(finalSchedules, schedule)
|
||||
pos = append(pos, len(finalScheduleEvents)-1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//3.先检测是否重复插入了课程(同一周、同一天、同一节已有课程)
|
||||
exists, err := ss.scheduleDAO.CheckScheduleConflict(ctx, finalSchedules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return nil, respond.InsertCourseTwice
|
||||
}
|
||||
//4.再检查是否和某些非课程的日程冲突(同一周、同一天、同一节已有非课程日程),并给出具体的冲突信息
|
||||
conflicts, err := ss.scheduleDAO.GetNonCourseScheduleConflicts(ctx, finalSchedules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conflicts) > 0 {
|
||||
ret := conv.SchedulesToScheduleConflictDetail(conflicts)
|
||||
return ret, respond.ScheduleConflict
|
||||
}
|
||||
//5.事务:插入两个表要么都成功,要么都回滚
|
||||
err = ss.courseDAO.Transaction(func(txDAO *dao.CourseDAO) error {
|
||||
ids, err := txDAO.AddUserCoursesIntoScheduleEvents(ctx, finalScheduleEvents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 将生成的 ScheduleEvent ID 赋值给对应的 Schedule 的 EventID 字段
|
||||
for i := range finalSchedules {
|
||||
finalSchedules[i].EventID = ids[pos[i]]
|
||||
}
|
||||
if err := txDAO.AddUserCoursesIntoSchedule(ctx, finalSchedules); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return nil, respond.InsertCourseTwice
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,295 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCourseImageMaxBytes = 5 * 1024 * 1024
|
||||
defaultCourseImageMaxTokens = 16384
|
||||
maxCourseImageDraftRows = 256
|
||||
courseImageParseTemperature = 0.1
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCourseImageParserUnavailable = errors.New("course image parser is not configured")
|
||||
ErrCourseImageTooLarge = errors.New("course image is too large")
|
||||
ErrCourseImageUnsupportedMIME = errors.New("course image mime type is not supported")
|
||||
ErrCourseImageEmpty = errors.New("course image is empty")
|
||||
)
|
||||
|
||||
type CourseImageParseConfig struct {
|
||||
MaxImageBytes int64
|
||||
MaxTokens int
|
||||
}
|
||||
|
||||
func NewCourseImageParseConfig(maxImageBytes int64, maxTokens int) CourseImageParseConfig {
|
||||
if maxImageBytes <= 0 {
|
||||
maxImageBytes = defaultCourseImageMaxBytes
|
||||
}
|
||||
if maxTokens <= 0 {
|
||||
maxTokens = defaultCourseImageMaxTokens
|
||||
}
|
||||
return CourseImageParseConfig{
|
||||
MaxImageBytes: maxImageBytes,
|
||||
MaxTokens: maxTokens,
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeCourseImageParseRequest(req model.CourseImageParseRequest, cfg CourseImageParseConfig) (*model.CourseImageParseRequest, error) {
|
||||
req.Filename = strings.TrimSpace(req.Filename)
|
||||
req.MIMEType = strings.TrimSpace(strings.ToLower(req.MIMEType))
|
||||
if len(req.ImageBytes) == 0 {
|
||||
return nil, ErrCourseImageEmpty
|
||||
}
|
||||
if int64(len(req.ImageBytes)) > cfg.MaxImageBytes {
|
||||
return nil, ErrCourseImageTooLarge
|
||||
}
|
||||
|
||||
detected := strings.ToLower(strings.TrimSpace(http.DetectContentType(req.ImageBytes)))
|
||||
if req.MIMEType == "" || req.MIMEType == "application/octet-stream" {
|
||||
req.MIMEType = detected
|
||||
}
|
||||
if !isSupportedCourseImageMIME(req.MIMEType) {
|
||||
if isSupportedCourseImageMIME(detected) {
|
||||
req.MIMEType = detected
|
||||
} else {
|
||||
return nil, ErrCourseImageUnsupportedMIME
|
||||
}
|
||||
}
|
||||
|
||||
if req.Filename == "" {
|
||||
req.Filename = "course-table"
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func isSupportedCourseImageMIME(mimeType string) bool {
|
||||
switch strings.TrimSpace(strings.ToLower(mimeType)) {
|
||||
case "image/jpeg", "image/png", "image/webp":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeCourseImageParseResponse(resp *model.CourseImageParseResponse) (*model.CourseImageParseResponse, error) {
|
||||
if resp == nil {
|
||||
return nil, errors.New("course image parse response is nil")
|
||||
}
|
||||
|
||||
resp.DraftStatus = model.CourseImageParseDraftStatus(strings.ToLower(strings.TrimSpace(string(resp.DraftStatus))))
|
||||
resp.Message = strings.TrimSpace(resp.Message)
|
||||
resp.Warnings = normalizeWarningList(resp.Warnings)
|
||||
resp.Rows = normalizeCourseImageParseRows(resp.Rows, &resp.Warnings)
|
||||
|
||||
switch resp.DraftStatus {
|
||||
case model.CourseImageParseDraftStatusSuccess:
|
||||
if len(resp.Rows) == 0 {
|
||||
return nil, errors.New("course image parse response has no rows in success status")
|
||||
}
|
||||
for idx := range resp.Rows {
|
||||
if err := validateCourseImageParseRow(&resp.Rows[idx], true); err != nil {
|
||||
return nil, fmt.Errorf("course image parse success row %d invalid: %w", idx+1, err)
|
||||
}
|
||||
}
|
||||
case model.CourseImageParseDraftStatusPartial:
|
||||
if len(resp.Rows) == 0 {
|
||||
return nil, errors.New("course image parse response has no rows in partial status")
|
||||
}
|
||||
for idx := range resp.Rows {
|
||||
if err := validateCourseImageParseRow(&resp.Rows[idx], false); err != nil {
|
||||
return nil, fmt.Errorf("course image parse partial row %d invalid: %w", idx+1, err)
|
||||
}
|
||||
}
|
||||
case model.CourseImageParseDraftStatusReject:
|
||||
resp.Rows = make([]model.CourseImageParseRow, 0)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported draft_status: %s", resp.DraftStatus)
|
||||
}
|
||||
|
||||
if resp.Message == "" {
|
||||
resp.Message = defaultCourseImageParseMessage(resp.DraftStatus, len(resp.Rows))
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func normalizeCourseImageParseRows(rows []model.CourseImageParseRow, warnings *[]string) []model.CourseImageParseRow {
|
||||
if len(rows) == 0 {
|
||||
return make([]model.CourseImageParseRow, 0)
|
||||
}
|
||||
if len(rows) > maxCourseImageDraftRows {
|
||||
rows = rows[:maxCourseImageDraftRows]
|
||||
appendUniqueWarning(warnings, "识别结果行数超过上限,后端已截断为 256 行,请重点核对。")
|
||||
}
|
||||
|
||||
normalized := make([]model.CourseImageParseRow, 0, len(rows))
|
||||
for idx := range rows {
|
||||
row := rows[idx]
|
||||
row.RowID = strings.TrimSpace(row.RowID)
|
||||
if row.RowID == "" {
|
||||
row.RowID = fmt.Sprintf("row_%03d", idx+1)
|
||||
}
|
||||
row.CourseName = strings.TrimSpace(row.CourseName)
|
||||
row.Location = strings.TrimSpace(row.Location)
|
||||
row.WeekType = normalizeCourseImageWeekType(row.WeekType)
|
||||
row.RawText = strings.TrimSpace(row.RawText)
|
||||
row.RowWarnings = normalizeWarningList(row.RowWarnings)
|
||||
normalizeOptionalPositiveInt(&row.StartWeek)
|
||||
normalizeOptionalPositiveInt(&row.EndWeek)
|
||||
normalizeOptionalPositiveInt(&row.DayOfWeek)
|
||||
normalizeOptionalPositiveInt(&row.StartSection)
|
||||
normalizeOptionalPositiveInt(&row.EndSection)
|
||||
if row.Confidence < 0 {
|
||||
row.Confidence = 0
|
||||
}
|
||||
if row.Confidence > 1 {
|
||||
row.Confidence = 1
|
||||
}
|
||||
if row.CourseName == "" &&
|
||||
row.StartWeek == nil &&
|
||||
row.EndWeek == nil &&
|
||||
row.DayOfWeek == nil &&
|
||||
row.StartSection == nil &&
|
||||
row.EndSection == nil &&
|
||||
row.RawText == "" {
|
||||
appendUniqueWarning(warnings, fmt.Sprintf("存在空白草稿行,后端已自动忽略:%s", row.RowID))
|
||||
continue
|
||||
}
|
||||
normalized = append(normalized, row)
|
||||
}
|
||||
|
||||
return normalized
|
||||
}
|
||||
|
||||
func validateCourseImageParseRow(row *model.CourseImageParseRow, strict bool) error {
|
||||
if row == nil {
|
||||
return errors.New("row is nil")
|
||||
}
|
||||
if strict && row.CourseName == "" {
|
||||
return errors.New("course_name is empty")
|
||||
}
|
||||
if strict && row.WeekType == "" {
|
||||
return errors.New("week_type is empty")
|
||||
}
|
||||
if row.WeekType != "" && row.WeekType != "all" && row.WeekType != "odd" && row.WeekType != "even" {
|
||||
return fmt.Errorf("week_type is invalid: %s", row.WeekType)
|
||||
}
|
||||
|
||||
if err := validateOptionalCourseIntPair(row.StartWeek, row.EndWeek, 1, 24, "week", strict); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateOptionalCourseIntPair(row.StartSection, row.EndSection, 1, 12, "section", strict); err != nil {
|
||||
return err
|
||||
}
|
||||
if strict && row.DayOfWeek == nil {
|
||||
return errors.New("day_of_week is empty")
|
||||
}
|
||||
if row.DayOfWeek != nil && (*row.DayOfWeek < 1 || *row.DayOfWeek > 7) {
|
||||
return fmt.Errorf("day_of_week out of range: %d", *row.DayOfWeek)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOptionalCourseIntPair(start *int, end *int, min int, max int, field string, strict bool) error {
|
||||
if strict {
|
||||
if start == nil || end == nil {
|
||||
return fmt.Errorf("%s range is incomplete", field)
|
||||
}
|
||||
}
|
||||
if start == nil && end == nil {
|
||||
return nil
|
||||
}
|
||||
if start == nil || end == nil {
|
||||
return fmt.Errorf("%s range is incomplete", field)
|
||||
}
|
||||
if *start < min || *start > max {
|
||||
return fmt.Errorf("%s start out of range: %d", field, *start)
|
||||
}
|
||||
if *end < min || *end > max {
|
||||
return fmt.Errorf("%s end out of range: %d", field, *end)
|
||||
}
|
||||
if *start > *end {
|
||||
return fmt.Errorf("%s start is greater than end: %d > %d", field, *start, *end)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizeOptionalPositiveInt(target **int) {
|
||||
if target == nil || *target == nil {
|
||||
return
|
||||
}
|
||||
if **target <= 0 {
|
||||
*target = nil
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeCourseImageWeekType(raw string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(raw))
|
||||
switch normalized {
|
||||
case "", "unknown", "null":
|
||||
return ""
|
||||
case "all", "every", "weekly", "each week", "每周", "全周", "全部":
|
||||
return "all"
|
||||
case "odd", "single", "单", "单周":
|
||||
return "odd"
|
||||
case "even", "double", "双", "双周":
|
||||
return "even"
|
||||
default:
|
||||
return normalized
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeWarningList(items []string) []string {
|
||||
if len(items) == 0 {
|
||||
return make([]string, 0)
|
||||
}
|
||||
seen := make(map[string]struct{}, len(items))
|
||||
result := make([]string, 0, len(items))
|
||||
for _, item := range items {
|
||||
trimmed := strings.TrimSpace(item)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[trimmed]; ok {
|
||||
continue
|
||||
}
|
||||
seen[trimmed] = struct{}{}
|
||||
result = append(result, trimmed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func appendUniqueWarning(target *[]string, warningText string) {
|
||||
if target == nil {
|
||||
return
|
||||
}
|
||||
trimmed := strings.TrimSpace(warningText)
|
||||
if trimmed == "" {
|
||||
return
|
||||
}
|
||||
for _, existing := range *target {
|
||||
if strings.TrimSpace(existing) == trimmed {
|
||||
return
|
||||
}
|
||||
}
|
||||
*target = append(*target, trimmed)
|
||||
}
|
||||
|
||||
func defaultCourseImageParseMessage(status model.CourseImageParseDraftStatus, rowCount int) string {
|
||||
switch status {
|
||||
case model.CourseImageParseDraftStatusSuccess:
|
||||
return fmt.Sprintf("已识别 %d 条课程安排,请重点核对周次、星期和节次。", rowCount)
|
||||
case model.CourseImageParseDraftStatusPartial:
|
||||
return fmt.Sprintf("已识别 %d 条课程安排,但仍存在不确定字段,请结合 warning 逐项核对。", rowCount)
|
||||
case model.CourseImageParseDraftStatusReject:
|
||||
return "图片信息不足,建议重新上传完整、清晰、包含表头和节次栏的总课表截图。"
|
||||
default:
|
||||
return "课程表图片识别已完成,请人工核对后再导入。"
|
||||
}
|
||||
}
|
||||
@@ -1,228 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
)
|
||||
|
||||
// ParseCourseTableImage 使用 Ark SDK Responses 解析课程表图片。
|
||||
func (ss *CourseService) ParseCourseTableImage(ctx context.Context, req model.CourseImageParseRequest) (*model.CourseImageParseResponse, error) {
|
||||
if ss == nil || ss.courseImageResponsesClient == nil {
|
||||
modelName := ""
|
||||
if ss != nil {
|
||||
modelName = ss.courseImageModel
|
||||
}
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] parser unavailable model_name=%q filename=%q mime=%q bytes=%d",
|
||||
modelName,
|
||||
req.Filename,
|
||||
req.MIMEType,
|
||||
len(req.ImageBytes),
|
||||
)
|
||||
return nil, ErrCourseImageParserUnavailable
|
||||
}
|
||||
|
||||
normalizedReq, err := normalizeCourseImageParseRequest(req, ss.courseImageConfig)
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] request normalization failed filename=%q mime=%q bytes=%d err=%v",
|
||||
req.Filename,
|
||||
req.MIMEType,
|
||||
len(req.ImageBytes),
|
||||
err,
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] normalized request model_name=%q filename=%q mime=%q bytes=%d max_bytes=%d",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
len(normalizedReq.ImageBytes),
|
||||
ss.courseImageConfig.MaxImageBytes,
|
||||
)
|
||||
|
||||
messages, base64Chars, promptChars := buildCourseImageParseResponsesMessages(normalizedReq)
|
||||
startAt := time.Now()
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] model invoke start model_name=%q filename=%q mime=%q message_count=%d base64_chars=%d prompt_chars=%d payload_chars_estimate=%d thinking=%s temperature=%.2f max_output_tokens=%d text_format=%s",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
len(messages),
|
||||
base64Chars,
|
||||
promptChars,
|
||||
base64Chars+promptChars+len(strings.TrimSpace(courseImageParseSystemPrompt)),
|
||||
llmservice.ThinkingModeDisabled,
|
||||
courseImageParseTemperature,
|
||||
ss.courseImageConfig.MaxTokens,
|
||||
"json_object",
|
||||
)
|
||||
|
||||
// 1. 课程表图片识别输出体量大,显式透传 max_output_tokens,避免被默认值截断。
|
||||
// 2. text_format 固定为 json_object,降低输出混入解释文本导致解析失败的概率。
|
||||
// 3. thinking 显式关闭,优先保证课程导入链路稳定性。
|
||||
draft, rawResult, err := llmservice.GenerateArkResponsesJSON[model.CourseImageParseResponse](ctx, ss.courseImageResponsesClient, messages, llmservice.ArkResponsesOptions{
|
||||
Temperature: courseImageParseTemperature,
|
||||
MaxOutputTokens: ss.courseImageConfig.MaxTokens,
|
||||
Thinking: llmservice.ThinkingModeDisabled,
|
||||
TextFormat: "json_object",
|
||||
})
|
||||
if err != nil {
|
||||
rawText := ""
|
||||
rawChars := 0
|
||||
status := ""
|
||||
incompleteReason := ""
|
||||
errorCode := ""
|
||||
errorMessage := ""
|
||||
inputTokens := int64(0)
|
||||
outputTokens := int64(0)
|
||||
totalTokens := int64(0)
|
||||
if rawResult != nil {
|
||||
rawText = strings.TrimSpace(rawResult.Text)
|
||||
rawChars = len(rawText)
|
||||
status = strings.TrimSpace(rawResult.Status)
|
||||
incompleteReason = strings.TrimSpace(rawResult.IncompleteReason)
|
||||
errorCode = strings.TrimSpace(rawResult.ErrorCode)
|
||||
errorMessage = strings.TrimSpace(rawResult.ErrorMessage)
|
||||
if rawResult.Usage != nil {
|
||||
inputTokens = rawResult.Usage.InputTokens
|
||||
outputTokens = rawResult.Usage.OutputTokens
|
||||
totalTokens = rawResult.Usage.TotalTokens
|
||||
}
|
||||
}
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] model invoke failed model_name=%q filename=%q mime=%q cost_ms=%d err=%v status=%q incomplete_reason=%q error_code=%q error_message=%q input_tokens=%d output_tokens=%d total_tokens=%d raw_chars=%d raw_full=\n%s",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
time.Since(startAt).Milliseconds(),
|
||||
err,
|
||||
status,
|
||||
incompleteReason,
|
||||
errorCode,
|
||||
errorMessage,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
totalTokens,
|
||||
rawChars,
|
||||
rawText,
|
||||
)
|
||||
if isCourseImageOutputTruncated(rawResult) {
|
||||
return nil, fmt.Errorf(
|
||||
"课程表识别输出疑似被 max_output_tokens 截断:status=%s incomplete_reason=%s output_tokens=%d max_output_tokens=%d",
|
||||
status,
|
||||
incompleteReason,
|
||||
outputTokens,
|
||||
ss.courseImageConfig.MaxTokens,
|
||||
)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawText := ""
|
||||
rawChars := 0
|
||||
status := ""
|
||||
incompleteReason := ""
|
||||
errorCode := ""
|
||||
errorMessage := ""
|
||||
inputTokens := int64(0)
|
||||
outputTokens := int64(0)
|
||||
totalTokens := int64(0)
|
||||
if rawResult != nil {
|
||||
rawText = strings.TrimSpace(rawResult.Text)
|
||||
rawChars = len(rawText)
|
||||
status = strings.TrimSpace(rawResult.Status)
|
||||
incompleteReason = strings.TrimSpace(rawResult.IncompleteReason)
|
||||
errorCode = strings.TrimSpace(rawResult.ErrorCode)
|
||||
errorMessage = strings.TrimSpace(rawResult.ErrorMessage)
|
||||
if rawResult.Usage != nil {
|
||||
inputTokens = rawResult.Usage.InputTokens
|
||||
outputTokens = rawResult.Usage.OutputTokens
|
||||
totalTokens = rawResult.Usage.TotalTokens
|
||||
}
|
||||
}
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] model invoke success model_name=%q filename=%q mime=%q cost_ms=%d status=%q incomplete_reason=%q error_code=%q error_message=%q input_tokens=%d output_tokens=%d total_tokens=%d raw_chars=%d raw_full=\n%s",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
time.Since(startAt).Milliseconds(),
|
||||
status,
|
||||
incompleteReason,
|
||||
errorCode,
|
||||
errorMessage,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
totalTokens,
|
||||
rawChars,
|
||||
rawText,
|
||||
)
|
||||
|
||||
normalizedDraft, err := normalizeCourseImageParseResponse(draft)
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] draft normalization failed model_name=%q filename=%q err=%v draft_status=%v row_count=%d",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
err,
|
||||
draft.DraftStatus,
|
||||
len(draft.Rows),
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] draft normalization success model_name=%q filename=%q draft_status=%s rows=%d warnings=%d",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedDraft.DraftStatus,
|
||||
len(normalizedDraft.Rows),
|
||||
len(normalizedDraft.Warnings),
|
||||
)
|
||||
|
||||
return normalizedDraft, nil
|
||||
}
|
||||
|
||||
func buildCourseImageParseResponsesMessages(req *model.CourseImageParseRequest) ([]llmservice.ArkResponsesMessage, int, int) {
|
||||
userPrompt := fmt.Sprintf(courseImageParseUserPromptTemplate, req.Filename, req.MIMEType)
|
||||
base64Data := base64.StdEncoding.EncodeToString(req.ImageBytes)
|
||||
imageDataURL := fmt.Sprintf("data:%s;base64,%s", req.MIMEType, base64Data)
|
||||
|
||||
messages := []llmservice.ArkResponsesMessage{
|
||||
{
|
||||
Role: "system",
|
||||
Text: strings.TrimSpace(courseImageParseSystemPrompt),
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Text: strings.TrimSpace(userPrompt),
|
||||
ImageURL: imageDataURL,
|
||||
ImageDetail: "high",
|
||||
},
|
||||
}
|
||||
return messages, len(base64Data), len(strings.TrimSpace(userPrompt))
|
||||
}
|
||||
|
||||
func isCourseImageOutputTruncated(rawResult *llmservice.ArkResponsesResult) bool {
|
||||
if rawResult == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
reason := strings.ToLower(strings.TrimSpace(rawResult.IncompleteReason))
|
||||
if strings.Contains(reason, "max_output_tokens") ||
|
||||
strings.Contains(reason, "max_tokens") ||
|
||||
strings.Contains(reason, "length") {
|
||||
return true
|
||||
}
|
||||
|
||||
return strings.EqualFold(strings.TrimSpace(rawResult.Status), "incomplete") && reason == ""
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package service
|
||||
|
||||
const courseImageParseSystemPrompt = `
|
||||
你是 SmartFlow 的“总课表图片识别器”。你的唯一任务是读取用户上传的总课表图片,输出结构化 JSON 草稿,供前端人工核对后再导入系统。
|
||||
|
||||
必须遵守以下规则:
|
||||
1. 只能输出一个 JSON 对象,禁止输出 Markdown、代码块、解释文字或额外前后缀。
|
||||
2. 顶层 JSON 结构必须是:
|
||||
{
|
||||
"draft_status": "success | partial | reject",
|
||||
"message": "字符串",
|
||||
"warnings": ["字符串"],
|
||||
"rows": [
|
||||
{
|
||||
"row_id": "字符串,可为空",
|
||||
"course_name": "字符串",
|
||||
"location": "字符串",
|
||||
"is_allow_tasks": false,
|
||||
"start_week": 1,
|
||||
"end_week": 16,
|
||||
"day_of_week": 1,
|
||||
"start_section": 1,
|
||||
"end_section": 2,
|
||||
"week_type": "all | odd | even",
|
||||
"confidence": 0.92,
|
||||
"raw_text": "原图中对应的近似文本",
|
||||
"row_warnings": ["字符串"]
|
||||
}
|
||||
]
|
||||
}
|
||||
3. rows 中一行只表达一个“课程安排片段”,不要把同一门课的多个时间段强行合并成一行。
|
||||
4. is_allow_tasks 无法从课表图片稳定识别时,一律返回 false,不要自行猜测。
|
||||
5. 若图片完整且大部分字段明确,可返回 success。
|
||||
6. 若图片可识别出部分行,但存在裁切、模糊、遮挡、单双周不清晰、节次/周次不确定等问题,返回 partial。
|
||||
7. 若图片严重不完整、分辨率过低、主体不是课表、无法可靠识别,返回 reject,同时 rows 置为空数组。
|
||||
8. 不要编造信息。看不清的数值字段请返回 null,并在 row_warnings 或 warnings 中明确说明原因。
|
||||
9. week_type 只能是:
|
||||
- all:每周/未标注单双周
|
||||
- odd:单周
|
||||
- even:双周
|
||||
10. day_of_week 使用 1-7 表示周一到周日。
|
||||
11. start_section/end_section 使用原子节次编号,例如 1-2 节应输出 start_section=1, end_section=2。
|
||||
12. confidence 取 0 到 1 之间的小数;不确定时可以偏保守。
|
||||
13. 如果 rows 不为空,优先保证“周次、星期、节次”准确,地点可为空字符串。
|
||||
14. 当图片信息不足时,应明确拒绝或降级为 partial,而不是强行补全。
|
||||
15. 填写json中course_name时,严格按照截图的课程名称来。例如,有的课可能既有本体,又有实验课,这算是两门不同的课。
|
||||
16. 周信息是可能出现中断的,例如一节课可能是第1周和第6-12周,这是正常的课程安排,请不要擅自更改。
|
||||
`
|
||||
|
||||
const courseImageParseUserPromptTemplate = `
|
||||
请识别这张总课表图片,并严格按照约定 JSON 输出草稿。
|
||||
|
||||
补充约束:
|
||||
1. 文件名:%s
|
||||
2. MIME 类型:%s
|
||||
3. 这是一张供学生核对的“导入草稿”,不是最终真值;不确定就留空或写 warning。
|
||||
4. 如果图片右侧、底部、表头、周次栏、节次栏有缺失,请优先返回 partial 或 reject。
|
||||
5. rows 里尽量保留 raw_text,方便前端逐行回显核对。
|
||||
`
|
||||
@@ -1,866 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/logic"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
type ScheduleService struct {
|
||||
scheduleDAO *dao.ScheduleDAO
|
||||
taskClassDAO *dao.TaskClassDAO
|
||||
repoManager *dao.RepoManager // 统一管理多个 DAO 的事务
|
||||
cacheDAO *dao.CacheDAO // 需要在 ScheduleService 中使用缓存
|
||||
}
|
||||
|
||||
func NewScheduleService(scheduleDAO *dao.ScheduleDAO, taskClassDAO *dao.TaskClassDAO, repoManager *dao.RepoManager, cacheDAO *dao.CacheDAO) *ScheduleService {
|
||||
return &ScheduleService{
|
||||
scheduleDAO: scheduleDAO,
|
||||
taskClassDAO: taskClassDAO,
|
||||
repoManager: repoManager,
|
||||
cacheDAO: cacheDAO,
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserTodaySchedule(ctx context.Context, userID int) ([]model.UserTodaySchedule, error) {
|
||||
//1.先尝试从缓存获取数据
|
||||
cachedResp, err := ss.cacheDAO.GetUserTodayScheduleFromCache(ctx, userID)
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//2.获取当前日期
|
||||
/*curTime := time.Now().Format("2006-01-02")*/
|
||||
curTime := "2026-03-02" //测试数据
|
||||
week, dayOfWeek, err := conv.RealDateToRelativeDate(curTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.查询用户当天的日程安排
|
||||
schedules, err := ss.scheduleDAO.GetUserTodaySchedule(ctx, userID, week, dayOfWeek) //测试数据
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//4.转换为前端需要的格式
|
||||
todaySchedules := conv.SchedulesToUserTodaySchedule(schedules)
|
||||
//5.将查询结果存入缓存,设置过期时间为当天结束
|
||||
err = ss.cacheDAO.SetUserTodayScheduleToCache(ctx, userID, todaySchedules)
|
||||
return todaySchedules, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserWeeklySchedule(ctx context.Context, userID, week int) (*model.UserWeekSchedule, error) {
|
||||
//1.先检查 week 参数是否合法
|
||||
if week < 0 || week > 25 {
|
||||
return nil, respond.WeekOutOfRange
|
||||
}
|
||||
//2.先看看缓存里有没有数据(如果有的话直接返回,没有的话继续查库)
|
||||
cachedResp, err := ss.cacheDAO.GetUserWeeklyScheduleFromCache(ctx, userID, week)
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//3.查询用户每周的日程安排
|
||||
//如果没有传入 week 参数,则默认查询当前周的日程安排
|
||||
if week == 0 {
|
||||
curTime := time.Now().Format("2006-01-02")
|
||||
var err error
|
||||
week, _, err = conv.RealDateToRelativeDate(curTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
schedules, err := ss.scheduleDAO.GetUserWeeklySchedule(ctx, userID, week)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.转换为前端需要的格式
|
||||
weeklySchedule := conv.SchedulesToUserWeeklySchedule(schedules)
|
||||
weeklySchedule.Week = week
|
||||
//4.将查询结果存入缓存,设置过期时间为一周(或者根据实际情况调整)
|
||||
err = ss.cacheDAO.SetUserWeeklyScheduleToCache(ctx, userID, weeklySchedule)
|
||||
return weeklySchedule, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) DeleteScheduleEvent(ctx context.Context, requests []model.UserDeleteScheduleEvent, userID int) error {
|
||||
err := ss.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
for _, req := range requests {
|
||||
//1.如果要删课程和嵌入的事件
|
||||
if req.DeleteEmbeddedTask && req.DeleteCourse {
|
||||
//通过schedule表的embedded_task_id字段找到对应的task_id
|
||||
taskID, err := txM.Schedule.GetScheduleEmbeddedTaskID(ctx, req.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//再将task_items表中对应的embedded_time字段设置为null
|
||||
if taskID != 0 {
|
||||
err = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//再删除课程事件和嵌入的事件(通过级联删除实现)
|
||||
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
//2.只删课程/事件
|
||||
if req.DeleteCourse {
|
||||
//2.1.检查课程是否有嵌入的任务事件
|
||||
exists, err := txM.Schedule.IfScheduleEventIDExists(ctx, req.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return respond.WrongScheduleEventID
|
||||
}
|
||||
embeddedTaskID, err := txM.Schedule.GetScheduleEmbeddedTaskID(ctx, req.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//2.2.如果有,则需另外为其创建新的scheduleEvent(type=task)
|
||||
//课程事件先删除后再创建任务事件
|
||||
if embeddedTaskID != 0 {
|
||||
//2.2.1.先通过id取出taskClassItem详情
|
||||
taskClassItem, err := txM.TaskClass.GetTaskClassItemByID(ctx, embeddedTaskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//下方开启事务,删除课程事件并创建新的任务事件
|
||||
//2.2.2.删除课程事件
|
||||
txErr := txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//2.2.3.再复用代码创建新的scheduleEvent,下方代码改编自AddTaskClassItemIntoSchedule函数
|
||||
//直接构造Schedule模型
|
||||
sections := make([]int, 0, taskClassItem.EmbeddedTime.SectionTo-taskClassItem.EmbeddedTime.SectionFrom+1)
|
||||
// 这里的 req 主要是为了传递 Week 和 DayOfWeek,其他字段不需要了
|
||||
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(
|
||||
&model.UserInsertTaskClassItemToScheduleRequest{
|
||||
Week: taskClassItem.EmbeddedTime.Week,
|
||||
DayOfWeek: taskClassItem.EmbeddedTime.DayOfWeek},
|
||||
taskClassItem, nil, userID, taskClassItem.EmbeddedTime.SectionFrom, taskClassItem.EmbeddedTime.SectionTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//将节次区间转换为节次切片,方便后续检查冲突
|
||||
for section := taskClassItem.EmbeddedTime.SectionFrom; section <= taskClassItem.EmbeddedTime.SectionTo; section++ {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
//单用户不存在删除时这个格子被占用的情况,所以不检查冲突了
|
||||
/*//4.1 统一检查冲突(避免逐条查库)
|
||||
conflict, err := ss.scheduleDAO.HasUserScheduleConflict(ctx, userID, req.Week, req.DayOfWeek, sections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conflict {
|
||||
return respond.ScheduleConflict
|
||||
}*/
|
||||
// 5. 写入数据库(通过 RepoManager 统一管理事务)
|
||||
// 这里的 sv.daoManager 是你在初始化 Service 时注入的全局 RepoManager 实例
|
||||
// 5.1 使用事务中的 ScheduleRepo 插入 Event
|
||||
eventID, txErr := txM.Schedule.AddScheduleEvent(scheduleEvent)
|
||||
if txErr != nil {
|
||||
return txErr // 触发回滚
|
||||
}
|
||||
// 5.2 关联 ID(纯内存操作,无需 tx)
|
||||
for i := range schedules {
|
||||
schedules[i].EventID = eventID
|
||||
}
|
||||
// 5.3 使用事务中的 ScheduleRepo 批量插入原子槽位
|
||||
if _, txErr = txM.Schedule.AddSchedules(schedules); txErr != nil {
|
||||
return txErr // 触发回滚
|
||||
}
|
||||
// 5.4 使用事务中的 TaskRepo 更新任务状态
|
||||
if txErr = txM.TaskClass.UpdateTaskClassItemEmbeddedTime(ctx, embeddedTaskID, taskClassItem.EmbeddedTime); txErr != nil {
|
||||
return txErr // 触发回滚
|
||||
}
|
||||
continue
|
||||
}
|
||||
//2.3.如果没有嵌入的事件,就直接删除课程事件
|
||||
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//先通过rel_id找到对应的task_id
|
||||
taskID, txErr := txM.Schedule.GetRelIDByScheduleEventID(ctx, req.ID)
|
||||
if txErr != nil {
|
||||
return err
|
||||
}
|
||||
//2.4.如果是任务块,转而去清除task_items表中的嵌入时间
|
||||
if taskID != 0 {
|
||||
//再将task_items表中对应的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
//3.只删嵌入的事件
|
||||
if req.DeleteEmbeddedTask {
|
||||
//下面先设置schedule表的embedded_task_id字段为null,再设置task_items表的embedded_time字段为null,实现删除嵌入事件的效果
|
||||
//3.1.先将schedule表的embedded_task_id字段设置为null
|
||||
taskID, txErr := txM.Schedule.SetScheduleEmbeddedTaskIDToNull(ctx, req.ID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//3.2.再将task_items表的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserRecentCompletedSchedules(ctx context.Context, userID, index, limit int) (*model.UserRecentCompletedScheduleResponse, error) {
|
||||
//1.先查缓存
|
||||
cachedResp, err := ss.cacheDAO.GetUserRecentCompletedSchedulesFromCache(ctx, userID, index, limit)
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//2.查询用户最近完成的日程安排
|
||||
//获取现在的时间
|
||||
/*nowTime := time.Now()*/
|
||||
nowTime := time.Date(2026, 6, 30, 12, 0, 0, 0, time.Local) //测试数据
|
||||
schedules, err := ss.scheduleDAO.GetUserRecentCompletedSchedules(ctx, nowTime, userID, index, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.转换为前端需要的格式
|
||||
result := conv.SchedulesToRecentCompletedSchedules(schedules)
|
||||
//4.将查询结果存入缓存,设置过期时间为30分钟(根据实际情况调整)
|
||||
err = ss.cacheDAO.SetUserRecentCompletedSchedulesToCache(ctx, userID, index, limit, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserOngoingSchedule(ctx context.Context, userID int) (*model.OngoingSchedule, error) {
|
||||
//1.先查缓存
|
||||
cachedResp, err := ss.cacheDAO.GetUserOngoingScheduleFromCache(ctx, userID)
|
||||
if err == nil && cachedResp == nil {
|
||||
// 之前缓存过没有正在进行的日程,直接返回 nil
|
||||
return nil, respond.NoOngoingOrUpcomingSchedule
|
||||
}
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//2.查询用户正在进行的日程安排
|
||||
/*nowTime := time.Now()*/
|
||||
nowTime := time.Date(2026, 6, 30, 18, 50, 0, 0, time.Local) //测试数据
|
||||
schedules, err := ss.scheduleDAO.GetUserOngoingSchedule(ctx, userID, nowTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.转换为前端需要的格式
|
||||
result := conv.SchedulesToUserOngoingSchedule(schedules)
|
||||
if result != nil {
|
||||
if result.StartTime.After(nowTime) {
|
||||
result.TimeStatus = "upcoming"
|
||||
} else {
|
||||
result.TimeStatus = "ongoing"
|
||||
}
|
||||
}
|
||||
//4.将查询结果存入缓存,设置过期时间直到此任务结束(根据实际情况调整)
|
||||
err = ss.cacheDAO.SetUserOngoingScheduleToCache(ctx, userID, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result == nil {
|
||||
// 没有正在进行或即将开始的日程,返回特定错误
|
||||
return nil, respond.NoOngoingOrUpcomingSchedule
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) RevocateUserTaskClassItem(ctx context.Context, userID, eventID int) error {
|
||||
//1.先查库,看看这个event是任务事件还是课程事件,以及判断它是否属于用户
|
||||
eventType, err := ss.scheduleDAO.GetScheduleTypeByEventID(ctx, eventID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//2.根据查询结果进行不同的撤销操作
|
||||
if eventType == "course" {
|
||||
//下面开启事务,撤销嵌入事件
|
||||
err := ss.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//下面先设置schedule表的embedded_task_id字段为null,再设置task_items表的embedded_time字段为null,实现删除嵌入事件的效果
|
||||
//3.1.先将schedule表的embedded_task_id字段设置为null
|
||||
taskID, txErr := txM.Schedule.SetScheduleEmbeddedTaskIDToNull(ctx, eventID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//3.2.再将task_items表的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//3.3.最后设置task_items表的status字段为已撤销
|
||||
txErr = txM.Schedule.RevocateSchedulesByEventID(ctx, eventID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if eventType == "task" {
|
||||
//下面开启事务,撤销任务事件
|
||||
err := ss.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//先通过rel_id找到对应的task_id
|
||||
taskID, txErr := txM.Schedule.GetRelIDByScheduleEventID(ctx, eventID)
|
||||
if txErr != nil {
|
||||
return err
|
||||
}
|
||||
//再将task_items表中对应的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//最后将其从日程表中删除(通过级联删除实现)
|
||||
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, eventID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Println("ScheduleService.RevocateUserTaskClassItem: eventType is neither embedded_task nor task, something must be wrong")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) SmartPlanning(ctx context.Context, userID, taskClassID int) ([]model.UserWeekSchedule, error) {
|
||||
//1.通过任务类id获取任务类详情
|
||||
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//2.校验任务类的参数是否合法
|
||||
if taskClass == nil {
|
||||
return nil, respond.WrongTaskClassID
|
||||
}
|
||||
if *taskClass.Mode != "auto" {
|
||||
return nil, respond.TaskClassModeNotAuto
|
||||
}
|
||||
//3.获取任务类安排的时间范围内的全部周数信息(左右边界不足一周的情况也要算作一周)
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(ctx, userID, conv.CalculateFirstDayOfWeek(*taskClass.StartDate), conv.CalculateLastDayOfWeek(*taskClass.EndDate))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//4.将多个周的信息传入智能排课算法,获取推荐的时间安排(周+周内的天+节次)
|
||||
result, err := logic.SmartPlanningMainLogic(schedules, taskClass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//5.将推荐的时间安排转换为前端需要的格式返回
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SmartPlanningRaw 执行粗排算法并同时返回展示结构和已分配的任务项。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 与 SmartPlanning 共享完全相同的前置校验和粗排逻辑;
|
||||
// 2. 额外返回 allocatedItems(每项的 EmbeddedTime 已由算法回填),
|
||||
// 供 Agent 排程链路直接转换为 BatchApplyPlans 请求,无需再让模型"二次分配"。
|
||||
func (ss *ScheduleService) SmartPlanningRaw(ctx context.Context, userID, taskClassID int) ([]model.UserWeekSchedule, []model.TaskClassItem, error) {
|
||||
// 1. 获取任务类详情。
|
||||
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if taskClass == nil {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
if *taskClass.Mode != "auto" {
|
||||
return nil, nil, respond.TaskClassModeNotAuto
|
||||
}
|
||||
|
||||
// 2. 获取时间范围内的全部日程。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(ctx, userID, conv.CalculateFirstDayOfWeek(*taskClass.StartDate), conv.CalculateLastDayOfWeek(*taskClass.EndDate))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 执行粗排算法,拿到已分配的 items(EmbeddedTime 已回填)。
|
||||
allocatedItems, err := logic.SmartPlanningRawItems(schedules, taskClass)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 同时生成展示结构,供 SSE 阶段推送给前端预览。
|
||||
displayResult := conv.PlanningResultToUserWeekSchedules(schedules, allocatedItems)
|
||||
return displayResult, allocatedItems, nil
|
||||
}
|
||||
|
||||
// SmartPlanningMulti 执行“多任务类智能粗排”,仅返回前端展示结构。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责把多任务类请求收口到统一粗排流程;
|
||||
// 2. 负责返回展示结构;
|
||||
// 3. 不返回底层分配细节(由 SmartPlanningMultiRaw 提供)。
|
||||
func (ss *ScheduleService) SmartPlanningMulti(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, error) {
|
||||
displayResult, _, err := ss.SmartPlanningMultiRaw(ctx, userID, taskClassIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return displayResult, nil
|
||||
}
|
||||
|
||||
// SmartPlanningMultiRaw 执行“多任务类智能粗排”,同时返回展示结构和已分配任务项。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责多任务类请求的完整前置处理(归一化/校验/排序/时间窗收敛);
|
||||
// 2. 负责调用多任务类粗排主逻辑(共享资源池);
|
||||
// 3. 只计算建议,不负责落库。
|
||||
func (ss *ScheduleService) SmartPlanningMultiRaw(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, []model.TaskClassItem, error) {
|
||||
// 1. 输入归一化。
|
||||
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||
if len(normalizedIDs) == 0 {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
// 2. 批量读取完整任务类(含 Items)。
|
||||
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 校验任务类并计算全局时间窗。
|
||||
orderedTaskClasses, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 拉取全局时间窗内的既有日程底板。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||
ctx,
|
||||
userID,
|
||||
conv.CalculateFirstDayOfWeek(globalStartDate),
|
||||
conv.CalculateLastDayOfWeek(globalEndDate),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 5. 执行多任务类粗排(共享资源池 + 增量占位)。
|
||||
allocatedItems, err := logic.SmartPlanningRawItemsMulti(schedules, orderedTaskClasses)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 6. 转换前端展示结构。
|
||||
displayResult := conv.PlanningResultToUserWeekSchedules(schedules, allocatedItems)
|
||||
return displayResult, allocatedItems, nil
|
||||
}
|
||||
|
||||
// ResolvePlanningWindowByTaskClasses 解析“多任务类排程窗口”的相对周/天边界。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责根据 task_class_ids 计算全局起止日期并转换成相对周/天;
|
||||
// 2. 不执行粗排、不查询课表、不生成 HybridEntries;
|
||||
// 3. 供 Agent 周级 Move 工具做硬边界校验,防止越界移动。
|
||||
//
|
||||
// 返回语义:
|
||||
// 1. startWeek/startDay:允许排程的起点(含);
|
||||
// 2. endWeek/endDay:允许排程的终点(含);
|
||||
// 3. error:任何校验或日期转换失败都返回错误。
|
||||
func (ss *ScheduleService) ResolvePlanningWindowByTaskClasses(ctx context.Context, userID int, taskClassIDs []int) (int, int, int, int, error) {
|
||||
// 1. 输入归一化:过滤非法值并去重。
|
||||
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||
if len(normalizedIDs) == 0 {
|
||||
return 0, 0, 0, 0, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
// 2. 批量查询任务类并复用统一校验逻辑,拿到全局起止日期。
|
||||
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
_, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
|
||||
// 3. 把绝对日期转换为“相对周/天”。
|
||||
// 3.1 这里统一复用 conv.RealDateToRelativeDate,确保和现有排程口径一致;
|
||||
// 3.2 若日期超出学期配置范围,直接返回错误,避免错误边界进入工具层。
|
||||
startWeek, startDay, err := conv.RealDateToRelativeDate(globalStartDate.Format(conv.DateFormat))
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
endWeek, endDay, err := conv.RealDateToRelativeDate(globalEndDate.Format(conv.DateFormat))
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
if endWeek < startWeek || (endWeek == startWeek && endDay < startDay) {
|
||||
return 0, 0, 0, 0, respond.InvalidDateRange
|
||||
}
|
||||
return startWeek, startDay, endWeek, endDay, nil
|
||||
}
|
||||
|
||||
// normalizeTaskClassIDsForMultiPlanning 归一化 task_class_ids(过滤非法值、去重并保序)。
|
||||
func normalizeTaskClassIDsForMultiPlanning(ids []int) []int {
|
||||
if len(ids) == 0 {
|
||||
return []int{}
|
||||
}
|
||||
normalized := make([]int, 0, len(ids))
|
||||
seen := make(map[int]struct{}, len(ids))
|
||||
for _, id := range ids {
|
||||
if id <= 0 {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[id]; exists {
|
||||
continue
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
normalized = append(normalized, id)
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
// prepareTaskClassesForMultiPlanning 把 DAO 结果转成可直接粗排的数据集。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 校验每个任务类可参与自动排程;
|
||||
// 2. 计算全局时间窗(最早开始 ~ 最晚结束);
|
||||
// 3. 执行多任务类排序策略。
|
||||
func prepareTaskClassesForMultiPlanning(taskClasses []model.TaskClass, orderedIDs []int) ([]*model.TaskClass, time.Time, time.Time, error) {
|
||||
if len(orderedIDs) == 0 {
|
||||
return nil, time.Time{}, time.Time{}, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
classByID := make(map[int]*model.TaskClass, len(taskClasses))
|
||||
for i := range taskClasses {
|
||||
tc := &taskClasses[i]
|
||||
classByID[tc.ID] = tc
|
||||
}
|
||||
|
||||
ordered := make([]*model.TaskClass, 0, len(orderedIDs))
|
||||
var globalStart time.Time
|
||||
var globalEnd time.Time
|
||||
for idx, id := range orderedIDs {
|
||||
taskClass, exists := classByID[id]
|
||||
if !exists || taskClass == nil {
|
||||
return nil, time.Time{}, time.Time{}, respond.WrongTaskClassID
|
||||
}
|
||||
if taskClass.Mode == nil || *taskClass.Mode != "auto" {
|
||||
return nil, time.Time{}, time.Time{}, respond.TaskClassModeNotAuto
|
||||
}
|
||||
if taskClass.StartDate == nil || taskClass.EndDate == nil {
|
||||
return nil, time.Time{}, time.Time{}, respond.InvalidDateRange
|
||||
}
|
||||
start := *taskClass.StartDate
|
||||
end := *taskClass.EndDate
|
||||
if end.Before(start) {
|
||||
return nil, time.Time{}, time.Time{}, respond.InvalidDateRange
|
||||
}
|
||||
if idx == 0 || start.Before(globalStart) {
|
||||
globalStart = start
|
||||
}
|
||||
if idx == 0 || end.After(globalEnd) {
|
||||
globalEnd = end
|
||||
}
|
||||
ordered = append(ordered, taskClass)
|
||||
}
|
||||
|
||||
sortTaskClassesForMultiPlanning(ordered, orderedIDs)
|
||||
return ordered, globalStart, globalEnd, nil
|
||||
}
|
||||
|
||||
// sortTaskClassesForMultiPlanning 执行稳定排序:
|
||||
// 1. end_date 早优先;
|
||||
// 2. rapid 优先于 steady;
|
||||
// 3. 输入顺序兜底。
|
||||
func sortTaskClassesForMultiPlanning(taskClasses []*model.TaskClass, inputOrder []int) {
|
||||
if len(taskClasses) <= 1 {
|
||||
return
|
||||
}
|
||||
orderIndex := make(map[int]int, len(inputOrder))
|
||||
for idx, id := range inputOrder {
|
||||
orderIndex[id] = idx
|
||||
}
|
||||
|
||||
sort.SliceStable(taskClasses, func(i, j int) bool {
|
||||
left := taskClasses[i]
|
||||
right := taskClasses[j]
|
||||
if left == nil || right == nil {
|
||||
return left != nil
|
||||
}
|
||||
if left.EndDate != nil && right.EndDate != nil && !left.EndDate.Equal(*right.EndDate) {
|
||||
return left.EndDate.Before(*right.EndDate)
|
||||
}
|
||||
leftRapid := left.Strategy != nil && *left.Strategy == "rapid"
|
||||
rightRapid := right.Strategy != nil && *right.Strategy == "rapid"
|
||||
if leftRapid != rightRapid {
|
||||
return leftRapid
|
||||
}
|
||||
leftOrder, leftOK := orderIndex[left.ID]
|
||||
rightOrder, rightOK := orderIndex[right.ID]
|
||||
if leftOK && rightOK && leftOrder != rightOrder {
|
||||
return leftOrder < rightOrder
|
||||
}
|
||||
return left.ID < right.ID
|
||||
})
|
||||
}
|
||||
|
||||
// HybridScheduleWithPlan 构建“单任务类”混合日程(existing + suggested)。
|
||||
func (ss *ScheduleService) HybridScheduleWithPlan(
|
||||
ctx context.Context, userID, taskClassID int,
|
||||
) ([]model.HybridScheduleEntry, []model.TaskClassItem, error) {
|
||||
// 1. 校验并读取任务类。
|
||||
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if taskClass == nil {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
if taskClass.Mode == nil || *taskClass.Mode != "auto" {
|
||||
return nil, nil, respond.TaskClassModeNotAuto
|
||||
}
|
||||
if taskClass.StartDate == nil || taskClass.EndDate == nil {
|
||||
return nil, nil, respond.InvalidDateRange
|
||||
}
|
||||
|
||||
// 2. 拉取时间窗内既有日程。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||
ctx, userID,
|
||||
conv.CalculateFirstDayOfWeek(*taskClass.StartDate),
|
||||
conv.CalculateLastDayOfWeek(*taskClass.EndDate),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 执行粗排。
|
||||
allocatedItems, err := logic.SmartPlanningRawItems(schedules, taskClass)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 统一合并。
|
||||
entries := buildHybridEntriesFromSchedulesAndAllocated(schedules, allocatedItems)
|
||||
return entries, allocatedItems, nil
|
||||
}
|
||||
|
||||
// HybridScheduleWithPlanMulti 构建“多任务类”混合日程(existing + suggested)。
|
||||
func (ss *ScheduleService) HybridScheduleWithPlanMulti(
|
||||
ctx context.Context,
|
||||
userID int,
|
||||
taskClassIDs []int,
|
||||
) ([]model.HybridScheduleEntry, []model.TaskClassItem, error) {
|
||||
// 1. 归一化任务类 ID。
|
||||
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||
if len(normalizedIDs) == 0 {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
// 2. 拉取任务类并做校验/排序。
|
||||
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
orderedTaskClasses, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 拉取全局时间窗内既有日程。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||
ctx,
|
||||
userID,
|
||||
conv.CalculateFirstDayOfWeek(globalStartDate),
|
||||
conv.CalculateLastDayOfWeek(globalEndDate),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 多任务类粗排。
|
||||
allocatedItems, err := logic.SmartPlanningRawItemsMulti(schedules, orderedTaskClasses)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 5. 统一合并。
|
||||
entries := buildHybridEntriesFromSchedulesAndAllocated(schedules, allocatedItems)
|
||||
return entries, allocatedItems, nil
|
||||
}
|
||||
|
||||
// buildHybridEntriesFromSchedulesAndAllocated 合并 existing/suggested 条目。
|
||||
//
|
||||
// 说明:
|
||||
// 1. existing 按“事件 + 天 + 可嵌入语义 + 阻塞语义”分组,再按连续节次切块;
|
||||
// 2. suggested 直接根据 allocatedItems 生成;
|
||||
// 3. 仅做内存组装,不做数据库操作。
|
||||
func buildHybridEntriesFromSchedulesAndAllocated(
|
||||
schedules []model.Schedule,
|
||||
allocatedItems []model.TaskClassItem,
|
||||
) []model.HybridScheduleEntry {
|
||||
entries := make([]model.HybridScheduleEntry, 0, len(schedules)/2+len(allocatedItems))
|
||||
|
||||
type eventGroupKey struct {
|
||||
EventID int
|
||||
Week int
|
||||
DayOfWeek int
|
||||
CanBeEmbedded bool
|
||||
BlockForSuggested bool
|
||||
}
|
||||
type eventGroup struct {
|
||||
Key eventGroupKey
|
||||
Name string
|
||||
Type string
|
||||
Sections []int
|
||||
}
|
||||
groupMap := make(map[eventGroupKey]*eventGroup)
|
||||
|
||||
// 1. 先处理 existing。
|
||||
for _, s := range schedules {
|
||||
name := "未知"
|
||||
typ := "course"
|
||||
canBeEmbedded := false
|
||||
if s.Event != nil {
|
||||
name = s.Event.Name
|
||||
typ = s.Event.Type
|
||||
canBeEmbedded = s.Event.CanBeEmbedded
|
||||
}
|
||||
|
||||
// 1.1 阻塞语义:
|
||||
// 1.1.1 task 默认阻塞;
|
||||
// 1.1.2 course 且不可嵌入时阻塞;
|
||||
// 1.1.3 course 且可嵌入时,若当前原子格未被 embedded_task 占用,则不阻塞。
|
||||
blockForSuggested := true
|
||||
if typ == "course" && canBeEmbedded && s.EmbeddedTaskID == nil {
|
||||
blockForSuggested = false
|
||||
}
|
||||
|
||||
key := eventGroupKey{
|
||||
EventID: s.EventID,
|
||||
Week: s.Week,
|
||||
DayOfWeek: s.DayOfWeek,
|
||||
CanBeEmbedded: canBeEmbedded,
|
||||
BlockForSuggested: blockForSuggested,
|
||||
}
|
||||
group, ok := groupMap[key]
|
||||
if !ok {
|
||||
group = &eventGroup{
|
||||
Key: key,
|
||||
Name: name,
|
||||
Type: typ,
|
||||
}
|
||||
groupMap[key] = group
|
||||
}
|
||||
group.Sections = append(group.Sections, s.Section)
|
||||
}
|
||||
|
||||
for _, group := range groupMap {
|
||||
if len(group.Sections) == 0 {
|
||||
continue
|
||||
}
|
||||
sort.Ints(group.Sections)
|
||||
|
||||
runStart := group.Sections[0]
|
||||
prev := group.Sections[0]
|
||||
flushRun := func(from, to int) {
|
||||
entries = append(entries, model.HybridScheduleEntry{
|
||||
Week: group.Key.Week,
|
||||
DayOfWeek: group.Key.DayOfWeek,
|
||||
SectionFrom: from,
|
||||
SectionTo: to,
|
||||
Name: group.Name,
|
||||
Type: group.Type,
|
||||
Status: "existing",
|
||||
EventID: group.Key.EventID,
|
||||
CanBeEmbedded: group.Key.CanBeEmbedded,
|
||||
BlockForSuggested: group.Key.BlockForSuggested,
|
||||
})
|
||||
}
|
||||
for i := 1; i < len(group.Sections); i++ {
|
||||
cur := group.Sections[i]
|
||||
if cur == prev+1 {
|
||||
prev = cur
|
||||
continue
|
||||
}
|
||||
flushRun(runStart, prev)
|
||||
runStart = cur
|
||||
prev = cur
|
||||
}
|
||||
flushRun(runStart, prev)
|
||||
}
|
||||
|
||||
// 2. 再处理 suggested。
|
||||
for _, item := range allocatedItems {
|
||||
if item.EmbeddedTime == nil {
|
||||
continue
|
||||
}
|
||||
name := "未命名任务"
|
||||
if item.Content != nil && strings.TrimSpace(*item.Content) != "" {
|
||||
name = strings.TrimSpace(*item.Content)
|
||||
}
|
||||
entries = append(entries, model.HybridScheduleEntry{
|
||||
Week: item.EmbeddedTime.Week,
|
||||
DayOfWeek: item.EmbeddedTime.DayOfWeek,
|
||||
SectionFrom: item.EmbeddedTime.SectionFrom,
|
||||
SectionTo: item.EmbeddedTime.SectionTo,
|
||||
Name: name,
|
||||
Type: "task",
|
||||
Status: "suggested",
|
||||
TaskItemID: item.ID,
|
||||
TaskClassID: derefInt(item.CategoryID),
|
||||
BlockForSuggested: true,
|
||||
})
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func derefInt(p *int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return *p
|
||||
}
|
||||
@@ -1,552 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type TaskClassService struct {
|
||||
// 这里可以添加数据库连接或其他依赖
|
||||
taskClassRepo *dao.TaskClassDAO
|
||||
cacheRepo *dao.CacheDAO
|
||||
scheduleRepo *dao.ScheduleDAO
|
||||
repoManager *dao.RepoManager // 统一管理多个 DAO 的事务
|
||||
}
|
||||
|
||||
func NewTaskClassService(taskClassRepo *dao.TaskClassDAO, cacheRepo *dao.CacheDAO, scheduleRepo *dao.ScheduleDAO, manager *dao.RepoManager) *TaskClassService {
|
||||
return &TaskClassService{
|
||||
taskClassRepo: taskClassRepo,
|
||||
cacheRepo: cacheRepo,
|
||||
scheduleRepo: scheduleRepo,
|
||||
repoManager: manager,
|
||||
}
|
||||
}
|
||||
|
||||
// AddOrUpdateTaskClass 为指定用户添加任务类
|
||||
func (sv *TaskClassService) AddOrUpdateTaskClass(ctx context.Context, req *model.UserAddTaskClassRequest, userID int, method int, targetTaskClassID int) error {
|
||||
//1.先校验参数
|
||||
if req.Mode == "auto" {
|
||||
if req.StartDate == "" || req.EndDate == "" {
|
||||
return respond.MissingParamForAutoScheduling
|
||||
}
|
||||
st, err := time.Parse("2006-01-02", req.StartDate)
|
||||
if err != nil {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
ed, err := time.Parse("2006-01-02", req.EndDate)
|
||||
if err != nil {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
if st.After(ed) {
|
||||
return respond.InvalidDateRange
|
||||
}
|
||||
}
|
||||
if req.Mode == "" || req.Name == "" || len(req.Items) == 0 {
|
||||
return respond.MissingParam
|
||||
}
|
||||
// 1. excluded_slots 属于“半天块索引”,每个索引映射 2 节(1->1-2,...,6->11-12);
|
||||
// 2. 若允许 7~12,会在粗排网格展开时产生越界节次,触发运行时 panic;
|
||||
// 3. 这里统一在写入入口拦截,避免脏数据落库后污染后续排程链路。
|
||||
for _, slot := range req.Config.ExcludedSlots {
|
||||
if slot < 1 || slot > 6 {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
}
|
||||
// 1. excluded_days_of_week 表示“整天不可排”的硬约束,粗排时会直接整天屏蔽;
|
||||
// 2. 只允许 1~7,对应周一到周日;
|
||||
// 3. 若写入非法值,会导致粗排过滤口径和前端展示口径不一致,因此入口直接拦截。
|
||||
for _, dayOfWeek := range req.Config.ExcludedDaysOfWeek {
|
||||
if dayOfWeek < 1 || dayOfWeek > 7 {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
}
|
||||
//2.写数据库(事务内)
|
||||
if err := sv.taskClassRepo.Transaction(func(txDAO *dao.TaskClassDAO) error {
|
||||
taskClass, items, err := conv.ProcessUserAddTaskClassRequest(req, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if method == 1 { // 更新操作
|
||||
taskClass.ID = targetTaskClassID
|
||||
}
|
||||
|
||||
taskClassID, err := txDAO.AddOrUpdateTaskClass(userID, taskClass)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
items[i].CategoryID = &taskClassID
|
||||
}
|
||||
if err := txDAO.AddOrUpdateTaskClassItems(userID, items); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) GetUserTaskClassInfos(ctx context.Context, userID int) (*model.UserGetTaskClassesResponse, error) {
|
||||
//1.先查询redis
|
||||
list, err := sv.cacheRepo.GetTaskClassList(ctx, userID)
|
||||
if err == nil {
|
||||
//命中缓存
|
||||
return list, nil
|
||||
} else if !errors.Is(err, redis.Nil) { //不是缓存未命中错误,说明redis可能炸了,照常放行
|
||||
log.Println("redis获取任务分类列表失败:", err)
|
||||
}
|
||||
//2.缓存未命中,查询数据库
|
||||
taskClasses, err := sv.taskClassRepo.GetUserTaskClasses(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := conv.TaskClassModelToResponse(taskClasses)
|
||||
//3.写入缓存
|
||||
err = sv.cacheRepo.AddTaskClassList(ctx, userID, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) GetUserCompleteTaskClass(ctx context.Context, userID int, taskClassID int) (*model.UserAddTaskClassRequest, error) {
|
||||
//1.查询数据库
|
||||
taskClass, err := sv.taskClassRepo.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//2.转换为响应结构体
|
||||
resp, err := conv.ProcessUserGetCompleteTaskClassRequest(taskClass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) AddTaskClassItemIntoSchedule(ctx context.Context, req *model.UserInsertTaskClassItemToScheduleRequest, userID int, taskID int) error {
|
||||
//1.先验证任务块归属
|
||||
taskClassID, err := sv.taskClassRepo.GetTaskClassIDByTaskItemID(ctx, taskID) //通过任务块ID获取所属任务类ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ownerID, err := sv.taskClassRepo.GetTaskClassUserIDByID(ctx, taskClassID) //通过任务类ID获取所属用户ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ownerID != userID {
|
||||
return respond.TaskClassItemNotBelongToUser
|
||||
}
|
||||
//2.再检查任务块本身是否已经被安排
|
||||
result, err := sv.taskClassRepo.IfTaskClassItemArranged(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result {
|
||||
return respond.TaskClassItemAlreadyArranged
|
||||
}
|
||||
//3.取出任务块信息
|
||||
taskItem, err := sv.taskClassRepo.GetTaskClassItemByID(ctx, taskID) //通过任务块ID获取任务块信息
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//更新TaskClassItem的embedded_time字段
|
||||
taskItem.EmbeddedTime = &model.TargetTime{
|
||||
DayOfWeek: req.DayOfWeek,
|
||||
Week: req.Week,
|
||||
SectionFrom: req.StartSection,
|
||||
SectionTo: req.EndSection,
|
||||
}
|
||||
//3.判断是否嵌入课程
|
||||
if req.EmbedCourseEventID != 0 {
|
||||
//先检查看课程是否存在、是否归属该用户以及是否已经被嵌入了其他任务块
|
||||
courseOwnerID, err := sv.scheduleRepo.GetCourseUserIDByID(ctx, req.EmbedCourseEventID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if courseOwnerID != userID {
|
||||
return respond.CourseNotBelongToUser
|
||||
}
|
||||
//再检查用户给的时间是否和课程的时间匹配(目前逻辑是给的区间必须完全匹配)
|
||||
match, err := sv.scheduleRepo.IsCourseTimeMatch(ctx, req.EmbedCourseEventID, req.Week, req.DayOfWeek, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !match {
|
||||
return respond.CourseTimeNotMatch
|
||||
}
|
||||
//查询对应时段的课程是否已被其他任务块嵌入了(目前业务限制:一个课程只能被一个任务块嵌入,但是目前设计是支持多个任务块嵌入一节课的,只要放得下)
|
||||
isEmbedded, err := sv.scheduleRepo.IsCourseEmbeddedByOtherTaskBlock(ctx, req.EmbedCourseEventID, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmbedded {
|
||||
return respond.CourseAlreadyEmbeddedByOtherTaskBlock
|
||||
}
|
||||
//嵌入课程,直接更新日程表对应时段的 embedded_task_id 字段
|
||||
err = sv.scheduleRepo.EmbedTaskIntoSchedule(req.StartSection, req.EndSection, req.DayOfWeek, req.Week, userID, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//更新任务块的 embedded_time 字段
|
||||
err = sv.taskClassRepo.UpdateTaskClassItemEmbeddedTime(ctx, taskID, taskItem.EmbeddedTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
//4.否则构造Schedule模型
|
||||
sections := make([]int, 0, req.EndSection-req.StartSection+1)
|
||||
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(req, taskItem, nil, userID, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//将节次区间转换为节次切片,方便后续检查冲突
|
||||
for section := req.StartSection; section <= req.EndSection; section++ {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
//4.1 统一检查冲突(避免逐条查库)
|
||||
conflict, err := sv.scheduleRepo.HasUserScheduleConflict(ctx, userID, req.Week, req.DayOfWeek, sections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conflict {
|
||||
return respond.ScheduleConflict
|
||||
}
|
||||
// 5. 写入数据库(通过 RepoManager 统一管理事务)
|
||||
// 这里的 sv.daoManager 是你在初始化 Service 时注入的全局 RepoManager 实例
|
||||
if err := sv.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
// 5.1 使用事务中的 ScheduleRepo 插入 Event
|
||||
// 💡 这里的 txM.Schedule 已经注入了事务句柄
|
||||
//此处要将req中的起始section以及第几周、星期几转换成绝对时间,存入scheduleEvent的StartTime和EndTime字段中,方便后续查询和冲突检查
|
||||
st, ed, err := conv.RelativeTimeToRealTime(req.Week, req.DayOfWeek, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scheduleEvent.StartTime = st
|
||||
scheduleEvent.EndTime = ed
|
||||
eventID, err := txM.Schedule.AddScheduleEvent(scheduleEvent)
|
||||
if err != nil {
|
||||
return err // 触发回滚
|
||||
}
|
||||
// 5.2 关联 ID(纯内存操作,无需 tx)
|
||||
for i := range schedules {
|
||||
schedules[i].EventID = eventID
|
||||
}
|
||||
// 5.3 使用事务中的 ScheduleRepo 批量插入原子槽位
|
||||
// 💡 如果这里因为外键或唯一索引报错,5.1 的 Event 也会被撤回
|
||||
if _, err = txM.Schedule.AddSchedules(schedules); err != nil {
|
||||
return err // 触发回滚
|
||||
}
|
||||
// 5.4 使用事务中的 TaskRepo 更新任务状态
|
||||
// 💡 这里的 txM.Task 取代了你原来的 txDAO
|
||||
if err := txM.TaskClass.UpdateTaskClassItemEmbeddedTime(ctx, taskID, taskItem.EmbeddedTime); err != nil {
|
||||
return err // 触发回滚
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
// 这里处理最终的错误返回,比如 respond.Error
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) DeleteTaskClassItem(ctx context.Context, userID int, taskItemID int) error {
|
||||
//1.先验证任务块归属
|
||||
taskClassID, err := sv.taskClassRepo.GetTaskClassIDByTaskItemID(ctx, taskItemID) //通过任务块ID获取所属任务类ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ownerID, err := sv.taskClassRepo.GetTaskClassUserIDByID(ctx, taskClassID) //通过任务类ID获取所属用户ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ownerID != userID {
|
||||
return respond.TaskClassItemNotBelongToUser
|
||||
}
|
||||
//2.如果该任务块已经被安排了,先解除安排,再删除任务块(事务)
|
||||
if err := sv.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//2.1.先检查该任务块是否已经被安排了
|
||||
arranged, err := txM.TaskClass.IfTaskClassItemArranged(ctx, taskItemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if arranged {
|
||||
//2.2.如果已经被安排了,先解除安排
|
||||
//先扫schedules找到该task_item_id并删除
|
||||
_, txErr := txM.Schedule.FindEmbeddedTaskIDAndDeleteIt(ctx, taskItemID)
|
||||
//2.3.再将task_items表的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskItemID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//再删除schedule_event表中对应的事件
|
||||
txErr = txM.Schedule.DeleteScheduleEventByTaskItemID(ctx, taskItemID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
}
|
||||
//2.4.最后删除任务块
|
||||
err = txM.TaskClass.DeleteTaskClassItemByID(ctx, taskItemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) DeleteTaskClass(ctx context.Context, userID int, taskClassID int) error {
|
||||
//1.先验证任务类归属
|
||||
ownerID, err := sv.taskClassRepo.GetTaskClassUserIDByID(ctx, taskClassID) //通过任务类ID获取所属用户ID
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return respond.WrongTaskClassID
|
||||
}
|
||||
return err
|
||||
}
|
||||
if ownerID != userID {
|
||||
return respond.TaskClassNotBelongToUser
|
||||
}
|
||||
//2.删除任务类(事务)
|
||||
err = sv.taskClassRepo.DeleteTaskClassByID(ctx, taskClassID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCompleteTaskClassByID 获取任务类完整详情(含关联的 TaskClassItem 列表)。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1) 直接委托 DAO 层查询,不做额外业务逻辑;
|
||||
// 2) 主要供 Agent 排程链路使用,获取 Items 用于 materialize 节点映射。
|
||||
func (sv *TaskClassService) GetCompleteTaskClassByID(ctx context.Context, taskClassID, userID int) (*model.TaskClass, error) {
|
||||
return sv.taskClassRepo.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) BatchApplyPlans(ctx context.Context, taskClassID int, userID int, plans *model.UserInsertTaskClassItemToScheduleRequestBatch) error {
|
||||
//1.通过任务类id获取任务类详情
|
||||
taskClass, err := sv.taskClassRepo.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return respond.WrongTaskClassID
|
||||
}
|
||||
return err
|
||||
}
|
||||
//2.校验任务类的参数是否合法
|
||||
if taskClass == nil {
|
||||
return respond.WrongTaskClassID
|
||||
}
|
||||
if *taskClass.Mode != "auto" {
|
||||
return respond.TaskClassModeNotAuto
|
||||
}
|
||||
//3.获取任务类安排的时间范围内的全部周数信息(左右边界不足一周的情况也要算作一周),用于下方冲突检查
|
||||
startWeekTime := conv.CalculateFirstDayOfWeek(*taskClass.StartDate)
|
||||
endWeekTime := conv.CalculateLastDayOfWeek(*taskClass.EndDate)
|
||||
schedules, err := sv.scheduleRepo.GetUserSchedulesByTimeRange(ctx, userID, startWeekTime, endWeekTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
startWeek, _, err := conv.RealDateToRelativeDate(startWeekTime.Format("2006-01-02"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endWeek, _, err := conv.RealDateToRelativeDate(endWeekTime.Format("2006-01-02"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//4.统一检查冲突(避免逐条查库)
|
||||
//先将日程放入一个map中,key是"周-星期-节次",value是课程信息,方便后续检查冲突
|
||||
courseMap := make(map[string]model.Schedule)
|
||||
for _, schedule := range schedules {
|
||||
key := fmt.Sprintf("%d-%d-%d", schedule.Week, schedule.DayOfWeek, schedule.Section)
|
||||
courseMap[key] = schedule
|
||||
}
|
||||
//再遍历每个任务块的安排时间,检查是否和课程冲突(目前逻辑是只要有一个时段冲突就算冲突,后续可以优化为统计冲突的时段数量,或者提供具体的冲突时段信息)
|
||||
for _, plan := range plans.Items {
|
||||
if plan.Week < startWeek || plan.Week > endWeek {
|
||||
return respond.TaskClassItemTryingToInsertOutOfTimeRange
|
||||
}
|
||||
for section := plan.StartSection; section <= plan.EndSection; section++ {
|
||||
key := fmt.Sprintf("%d-%d-%d", plan.Week, plan.DayOfWeek, section)
|
||||
// 如果课程存在,并且满足以下任一条件则认为冲突:
|
||||
// 1. 课程时段已经被其他任务块嵌入了(不允许多个任务块嵌入同一课程)
|
||||
// 2. 当前时段的课的EventID与用户计划中指定的EmbedCourseEventID不匹配(说明用户计划要嵌入的课程和当前时段的课不是同一节)
|
||||
// 3. 用户计划中没有指定EmbedCourseEventID(即EmbedCourseEventID为0),但当前时段有课(不允许在有课的时段安排任务块)
|
||||
// 4. 当前时段的课不允许被嵌入(即使用户计划中指定了EmbedCourseEventID,但如果课程本身不允许被嵌入了,也算冲突)
|
||||
if course, exists := courseMap[key]; exists && ((plan.EmbedCourseEventID != 0 && course.EmbeddedTask != nil) ||
|
||||
(plan.EmbedCourseEventID != course.EventID) || plan.EmbedCourseEventID == 0 || !course.Event.CanBeEmbedded) {
|
||||
return respond.ScheduleConflict
|
||||
}
|
||||
}
|
||||
}
|
||||
//5.分流批量写入数据库(通过 RepoManager 统一管理事务)
|
||||
//先分流
|
||||
toEmbed := make([]model.SingleTaskClassItem, 0) //需要嵌入课程的任务块
|
||||
toNormal := make([]model.SingleTaskClassItem, 0) //需要新建日程的任务块
|
||||
for _, item := range plans.Items {
|
||||
if item.EmbedCourseEventID != 0 {
|
||||
toEmbed = append(toEmbed, item)
|
||||
} else {
|
||||
toNormal = append(toNormal, item)
|
||||
}
|
||||
}
|
||||
//再开事务批量写库
|
||||
if err := sv.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//5.1 先处理需要嵌入课程的任务块
|
||||
//先提取出需要嵌入的课程ID和TaskItemID列表
|
||||
courseIDs := make([]int, 0, len(toEmbed))
|
||||
for _, item := range toEmbed {
|
||||
courseIDs = append(courseIDs, item.EmbedCourseEventID)
|
||||
}
|
||||
itemIDs := make([]int, 0, len(toEmbed))
|
||||
for _, item := range toEmbed {
|
||||
itemIDs = append(itemIDs, item.TaskItemID)
|
||||
}
|
||||
//检查任务块本身是否已经被安排
|
||||
result, err := sv.taskClassRepo.BatchCheckIfTaskClassItemsArranged(ctx, itemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result {
|
||||
return respond.TaskClassItemAlreadyArranged
|
||||
}
|
||||
//验证一下plans中的taskItemID确实都属于这个用户和这个任务类(避免用户恶意构造请求把别的用户的任务块或者不属于任何任务类的任务块也安排了)
|
||||
//同时也能检查是否重复
|
||||
result, err = sv.taskClassRepo.ValidateTaskItemIDsBelongToTaskClass(ctx, taskClassID, itemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !result {
|
||||
return respond.TaskClassItemNotBelongToTaskClass
|
||||
}
|
||||
//批量更新日程表中对应课程的embedded_task_id字段(目前业务限制:一个课程只能被一个任务块嵌入了,所以直接批量更新,不用担心覆盖问题)
|
||||
err = txM.Schedule.BatchEmbedTaskIntoSchedule(ctx, courseIDs, itemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//批量更新任务块的embedded_time字段
|
||||
targetTimes := make([]*model.TargetTime, 0, len(toEmbed))
|
||||
for _, item := range toEmbed {
|
||||
targetTimes = append(targetTimes, &model.TargetTime{
|
||||
DayOfWeek: item.DayOfWeek,
|
||||
Week: item.Week,
|
||||
SectionFrom: item.StartSection,
|
||||
SectionTo: item.EndSection,
|
||||
})
|
||||
}
|
||||
err = txM.TaskClass.BatchUpdateTaskClassItemEmbeddedTime(ctx, itemIDs, targetTimes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//5.2 再处理需要新建日程的任务块
|
||||
//先提取出需要新建日程的任务块ID列表
|
||||
normalItemIDs := make([]int, 0, len(toNormal))
|
||||
for _, item := range toNormal {
|
||||
normalItemIDs = append(normalItemIDs, item.TaskItemID)
|
||||
}
|
||||
//验证一下plans中的taskItemID确实都属于这个任务类(避免用户恶意构造请求把别的用户的任务块或者不属于任何任务类的任务块也安排了)
|
||||
result, err = sv.taskClassRepo.ValidateTaskItemIDsBelongToTaskClass(ctx, taskClassID, normalItemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !result {
|
||||
return respond.TaskClassItemNotBelongToTaskClass
|
||||
}
|
||||
//批量提取TaskItems
|
||||
taskItems, err := txM.TaskClass.GetTaskClassItemsByIDs(ctx, normalItemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(taskItems) != len(normalItemIDs) {
|
||||
log.Printf("警告:批量提取任务块时,返回的任务块数量与请求中的任务块ID数量不匹配,可能存在数据问题。请求ID数量:%d,返回任务块数量:%d", len(normalItemIDs), len(taskItems))
|
||||
return respond.InternalError(errors.New("返回的任务块数量与请求中的任务块ID数量不匹配,可能存在数据问题"))
|
||||
}
|
||||
//将toNormal按照TaskItemID升序排序,将taskItems也按照ID升序排序,保证一一对应关系(上面已经检查过重复)
|
||||
//如果请求中的任务块ID有重复,这里就无法保证一一对应关系了,后续可以考虑在请求层面加一个校验,拒绝包含重复任务块ID的请求
|
||||
sort.SliceStable(toNormal, func(i, j int) bool {
|
||||
return toNormal[i].TaskItemID < toNormal[j].TaskItemID
|
||||
})
|
||||
sort.SliceStable(taskItems, func(i, j int) bool {
|
||||
return taskItems[i].ID < taskItems[j].ID
|
||||
})
|
||||
//开始构建event和schedules
|
||||
finalSchedules := make([]model.Schedule, 0) //最终要插入数据库的Schedule切片
|
||||
finalScheduleEvents := make([]model.ScheduleEvent, 0) //最终要插入数据库的ScheduleEvent切片
|
||||
pos := make([]int, 0) //记录每个任务块对应的Schedule在finalSchedules中的位置,方便后续批量插入数据库后回填EventID
|
||||
for i := 0; i < len(toNormal); i++ {
|
||||
item := toNormal[i]
|
||||
taskItem := taskItems[i]
|
||||
if item.StartSection < 1 || item.EndSection > 12 || item.StartSection > item.EndSection {
|
||||
return respond.InvalidSectionRange
|
||||
}
|
||||
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(&model.UserInsertTaskClassItemToScheduleRequest{
|
||||
Week: item.Week,
|
||||
DayOfWeek: item.DayOfWeek,
|
||||
StartSection: item.StartSection,
|
||||
EndSection: item.EndSection,
|
||||
EmbedCourseEventID: 0, //不嵌入课程
|
||||
}, &taskItem, nil, userID, item.StartSection, item.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalScheduleEvents = append(finalScheduleEvents, *scheduleEvent)
|
||||
for range schedules {
|
||||
pos = append(pos, len(finalScheduleEvents)-1)
|
||||
}
|
||||
finalSchedules = append(finalSchedules, schedules...)
|
||||
}
|
||||
//最后批量插入数据库
|
||||
//先插入ScheduleEvent表,获取生成的EventID,再批量插入Schedule表,最后批量更新TaskClassItem的embedded_time字段
|
||||
ids, err := txM.Schedule.InsertScheduleEvents(ctx, finalScheduleEvents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 将生成的 ScheduleEvent ID 赋值给对应的 Schedule 的 EventID 字段
|
||||
for i := range finalSchedules {
|
||||
finalSchedules[i].EventID = ids[pos[i]]
|
||||
}
|
||||
if _, err = txM.Schedule.AddSchedules(finalSchedules); err != nil {
|
||||
return err
|
||||
}
|
||||
//批量更新任务块的embedded_time字段
|
||||
targetTimes = make([]*model.TargetTime, 0, len(toEmbed))
|
||||
for _, item := range toNormal {
|
||||
targetTimes = append(targetTimes, &model.TargetTime{
|
||||
DayOfWeek: item.DayOfWeek,
|
||||
Week: item.Week,
|
||||
SectionFrom: item.StartSection,
|
||||
SectionTo: item.EndSection,
|
||||
})
|
||||
}
|
||||
//提取出所有需要更新的任务块ID
|
||||
itemIDs = make([]int, 0, len(toNormal))
|
||||
for _, item := range toNormal {
|
||||
itemIDs = append(itemIDs, item.TaskItemID)
|
||||
}
|
||||
err = txM.TaskClass.BatchUpdateTaskClassItemEmbeddedTime(ctx, itemIDs, targetTimes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,537 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
// taskBatchStatusMaxIDs 限制批量状态查询的单次任务 ID 数量,避免大请求放大缓存/内存扫描成本。
|
||||
taskBatchStatusMaxIDs = 100
|
||||
// taskUrgencyPromoteDedupeTTL 是"同一任务平移请求"的去重锁有效期。
|
||||
//
|
||||
// 设计考虑:
|
||||
// 1. 太短会导致消费稍慢时被重复投递;
|
||||
// 2. 太长会导致首次投递失败后恢复变慢;
|
||||
// 3. 这里先取 120 秒作为折中值,后续可按线上观测再调优。
|
||||
taskUrgencyPromoteDedupeTTL = 120 * time.Second
|
||||
// taskUrgencyPromoteDedupeKeyFmt 是任务平移去重键模板。
|
||||
taskUrgencyPromoteDedupeKeyFmt = "smartflow:task:promote:pending:%d:%d"
|
||||
)
|
||||
|
||||
type TaskService struct {
|
||||
// dao 负责任务表读写。
|
||||
dao *dao.TaskDAO
|
||||
// cache 负责任务列表缓存与 Redis 去重锁能力。
|
||||
cache *dao.CacheDAO
|
||||
// eventPublisher 负责发布 outbox 事件(可能为空:例如未启用 Kafka/总线时)。
|
||||
eventPublisher outboxinfra.EventPublisher
|
||||
// activeScheduleDAO 负责维护主动调度 due job;为空时保持旧任务链路兼容。
|
||||
activeScheduleDAO *dao.ActiveScheduleDAO
|
||||
}
|
||||
|
||||
// NewTaskService 创建 TaskService 实例。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做依赖注入,不做连接可用性探测;
|
||||
// 2. 允许 eventPublisher 为空(用于本地降级场景)。
|
||||
func NewTaskService(taskDAO *dao.TaskDAO, cacheDAO *dao.CacheDAO, eventPublisher outboxinfra.EventPublisher) *TaskService {
|
||||
return &TaskService{
|
||||
dao: taskDAO,
|
||||
cache: cacheDAO,
|
||||
eventPublisher: eventPublisher,
|
||||
}
|
||||
}
|
||||
|
||||
// SetActiveScheduleDAO 注入主动调度自有表仓储。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责迁移期依赖接线,避免扩大 TaskService 构造函数调用面;
|
||||
// 2. 不改变任务主流程语义,未注入时主动调度 job 同步自动降级为 no-op。
|
||||
func (ts *TaskService) SetActiveScheduleDAO(activeScheduleDAO *dao.ActiveScheduleDAO) {
|
||||
if ts != nil {
|
||||
ts.activeScheduleDAO = activeScheduleDAO
|
||||
}
|
||||
}
|
||||
|
||||
// AddTask 新增任务。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责参数转换、优先级合法性校验与写库;
|
||||
// 2. 不负责"紧急性自动平移"逻辑(该逻辑发生在任务读取时的懒触发链路)。
|
||||
func (ts *TaskService) AddTask(ctx context.Context, req *model.UserAddTaskRequest, userID int) (*model.UserAddTaskResponse, error) {
|
||||
// 1. 把用户请求转换为内部模型,避免 API 层结构直接泄漏到 DAO。
|
||||
taskModel := conv.UserAddTaskRequestToModel(req, userID)
|
||||
// 2. 优先级范围校验:当前任务体系只允许 1~4。
|
||||
if taskModel.Priority < 1 || taskModel.Priority >= 5 {
|
||||
return nil, respond.InvalidPriority
|
||||
}
|
||||
// 3. 写库。
|
||||
createdTask, err := ts.dao.AddTask(taskModel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts.syncActiveScheduleJobBestEffort(ctx, createdTask)
|
||||
// 4. 返回对外响应 DTO。
|
||||
response := conv.ModelToUserAddTaskResponse(createdTask)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// CompleteTask 将用户指定任务标记为"已完成"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责入参校验与业务错误映射;
|
||||
// 2. 负责调用 DAO 执行状态更新;
|
||||
// 3. 不负责幂等键校验(幂等由中间件处理);
|
||||
// 4. 不负责缓存删除细节(缓存删除由 GORM cache_deleter 回调触发)。
|
||||
func (ts *TaskService) CompleteTask(ctx context.Context, req *model.UserCompleteTaskRequest, userID int) (*model.UserCompleteTaskResponse, error) {
|
||||
// 1. 参数兜底:请求体为空、非法 user 或非法 task_id 直接返回业务错误。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 调用 DAO 执行"查询 + 必要时更新"。
|
||||
updatedTask, alreadyCompleted, err := ts.dao.CompleteTaskByID(ctx, userID, req.TaskID)
|
||||
if err != nil {
|
||||
// 2.1 任务不存在或不属于当前用户时,统一映射为 WrongTaskID。
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
// 2.2 其余数据库异常向上透传,交由统一错误处理器返回 500。
|
||||
return nil, err
|
||||
}
|
||||
if updatedTask == nil {
|
||||
// 3. 极端防御:DAO 不应返回 nil,若发生则视为内部异常。
|
||||
return nil, errors.New("complete task succeeded but task is nil")
|
||||
}
|
||||
|
||||
// 4. 构造响应:
|
||||
// 4.1 already_completed=true 表示本次命中幂等,不影响最终成功状态;
|
||||
// 4.2 is_completed 始终为 true,便于前端直接刷新状态。
|
||||
resp := &model.UserCompleteTaskResponse{
|
||||
TaskID: updatedTask.ID,
|
||||
IsCompleted: true,
|
||||
AlreadyCompleted: alreadyCompleted,
|
||||
Status: "completed",
|
||||
}
|
||||
ts.cancelActiveScheduleJobBestEffort(ctx, updatedTask.UserID, updatedTask.ID, "task_completed")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UndoCompleteTask 取消用户任务的"已完成勾选"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责入参校验与业务错误映射;
|
||||
// 2. 负责调用 DAO 执行状态恢复;
|
||||
// 3. 不负责幂等缓存(本接口按需求要求:任务未完成时必须报错);
|
||||
// 4. 不负责缓存删除细节(由 GORM cache_deleter 回调自动处理)。
|
||||
func (ts *TaskService) UndoCompleteTask(ctx context.Context, req *model.UserUndoCompleteTaskRequest, userID int) (*model.UserUndoCompleteTaskResponse, error) {
|
||||
// 1. 参数兜底:请求体为空、非法 user 或非法 task_id 直接返回业务错误。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 调用 DAO 执行"恢复未完成"逻辑。
|
||||
updatedTask, err := ts.dao.UndoCompleteTaskByID(ctx, userID, req.TaskID)
|
||||
if err != nil {
|
||||
// 2.1 任务不存在或不属于当前用户,统一映射为 WrongTaskID。
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
// 2.2 任务本来就未完成:按需求返回明确业务错误。
|
||||
if errors.Is(err, respond.TaskNotCompleted) {
|
||||
return nil, respond.TaskNotCompleted
|
||||
}
|
||||
// 2.3 其余数据库异常继续向上透传。
|
||||
return nil, err
|
||||
}
|
||||
if updatedTask == nil {
|
||||
// 3. 极端防御:DAO 成功但返回 nil,视为内部异常。
|
||||
return nil, errors.New("undo complete task succeeded but task is nil")
|
||||
}
|
||||
|
||||
// 4. 组装响应:恢复成功后 is_completed 恒为 false。
|
||||
resp := &model.UserUndoCompleteTaskResponse{
|
||||
TaskID: updatedTask.ID,
|
||||
IsCompleted: false,
|
||||
Status: "uncompleted",
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetUserTasks 获取用户任务列表(含"读时紧急性派生"与"异步平移触发")。
|
||||
//
|
||||
// 核心流程(步骤化):
|
||||
// 1. 先读缓存,未命中再回源 DB,并把"原始模型"回填缓存;
|
||||
// 2. 在内存里做"读时派生":仅用于本次返回给前端,不直接改库;
|
||||
// 3. 收集"已到紧急分界线且仍处于非紧急象限"的任务 ID;
|
||||
// 4. 通过 Redis SETNX 去重后,发布 outbox 事件异步落库;
|
||||
// 5. 无论发布成功与否,都优先返回本次派生结果,保证用户读体验。
|
||||
//
|
||||
// 一致性策略:
|
||||
// 1. 缓存里存的是原始任务,不是派生后的优先级;
|
||||
// 2. 真实平移由异步消费者条件更新 DB;
|
||||
// 3. DB 更新后由 cache_deleter 自动删缓存,下一次读取自然拿到新状态。
|
||||
func (ts *TaskService) GetUserTasks(ctx context.Context, userID int) ([]model.GetUserTaskResp, error) {
|
||||
derivedTasks, err := ts.GetTasksWithUrgencyPromotion(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conv.ModelToGetUserTasksResp(derivedTasks), nil
|
||||
}
|
||||
|
||||
// BatchTaskStatus 批量查询当前登录用户任务的完成状态。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责请求 ID 的过滤、去重和数量限制;
|
||||
// 2. 只返回当前用户有权访问且仍存在的任务,避免泄露其他用户任务状态;
|
||||
// 3. 复用 getRawUserTasks 的 Redis 任务列表缓存链路,不新增绕过缓存的 DAO 查询;
|
||||
// 4. 该接口只读,不触发 GORM cache_deleter,也不反向修改 NewAgent timeline 历史快照。
|
||||
func (ts *TaskService) BatchTaskStatus(ctx context.Context, req *model.BatchTaskStatusRequest, userID int) (*model.BatchTaskStatusResponse, error) {
|
||||
resp := &model.BatchTaskStatusResponse{
|
||||
Items: []model.BatchTaskStatusItem{},
|
||||
}
|
||||
if userID <= 0 {
|
||||
return nil, respond.WrongUserID
|
||||
}
|
||||
if req == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// 1. 先把前端传入的历史卡片 task id 做归一化。
|
||||
// 1.1 非法 ID 直接过滤,避免无意义匹配;
|
||||
// 1.2 保留首次出现顺序,方便前端按请求顺序回填;
|
||||
// 1.3 超过上限时截断,避免单次 hydration 请求放大服务端成本。
|
||||
validIDs := compactPositiveUniqueTaskIDsWithLimit(req.IDs, taskBatchStatusMaxIDs)
|
||||
if len(validIDs) == 0 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// 2. 复用原始任务读取链路。
|
||||
// 2.1 命中 Redis 时直接读取 smartflow:tasks:{userID};
|
||||
// 2.2 未命中时由 getRawUserTasks 回源 DB 并回填缓存;
|
||||
// 2.3 用户没有任何任务时映射为空 items,符合 hydration 的“无匹配不报错”语义。
|
||||
tasks, err := ts.getRawUserTasks(ctx, userID)
|
||||
if err != nil {
|
||||
if errors.Is(err, respond.UserTasksEmpty) {
|
||||
return resp, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 在当前用户任务集合内做内存匹配。
|
||||
// 3.1 不命中的 ID 可能是已删除、属于其他用户、或历史快照里的旧任务,统一静默过滤;
|
||||
// 3.2 返回字段只包含当前模型可用的完成状态,避免伪造不存在的 updated_at。
|
||||
taskByID := make(map[int]model.Task, len(tasks))
|
||||
for _, task := range tasks {
|
||||
taskByID[task.ID] = task
|
||||
}
|
||||
for _, id := range validIDs {
|
||||
task, exists := taskByID[id]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
resp.Items = append(resp.Items, model.BatchTaskStatusItem{
|
||||
ID: task.ID,
|
||||
IsCompleted: task.IsCompleted,
|
||||
})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetTasksWithUrgencyPromotion 读取用户任务并应用读时紧急性提升 + 异步落库触发。
|
||||
//
|
||||
// 统一入口,供前端查询(GetUserTasks)和 LLM 工具查询(QueryTasksForTool)复用。
|
||||
// 调用方不应假设 DB 已更新——持久化是异步的。
|
||||
func (ts *TaskService) GetTasksWithUrgencyPromotion(ctx context.Context, userID int) ([]model.Task, error) {
|
||||
rawTasks, err := ts.getRawUserTasks(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
derivedTasks, duePromoteTaskIDs := deriveTaskUrgencyForRead(rawTasks, time.Now())
|
||||
ts.tryEnqueueTaskUrgencyPromote(ctx, userID, duePromoteTaskIDs)
|
||||
return derivedTasks, nil
|
||||
}
|
||||
|
||||
// getRawUserTasks 读取"原始任务模型"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责缓存命中/回源 DB/回填缓存;
|
||||
// 2. 不做优先级派生,不做异步事件投递;
|
||||
// 3. 缓存写失败只记日志,不阻断主流程。
|
||||
func (ts *TaskService) getRawUserTasks(ctx context.Context, userID int) ([]model.Task, error) {
|
||||
// 1. 先查缓存:命中则直接返回。
|
||||
cachedTasks, err := ts.cache.GetUserTasksFromCache(ctx, userID)
|
||||
if err == nil {
|
||||
return cachedTasks, nil
|
||||
}
|
||||
|
||||
// 2. 非 redis.Nil 错误直接返回,避免掩盖真实故障。
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 缓存未命中回源 DB。
|
||||
dbTasks, err := ts.dao.GetTasksByUserID(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 4. 回填缓存(失败不阻断主链路)。
|
||||
if setErr := ts.cache.SetUserTasksToCache(ctx, userID, dbTasks); setErr != nil {
|
||||
log.Printf("写入用户任务缓存失败: user_id=%d err=%v", userID, setErr)
|
||||
}
|
||||
return dbTasks, nil
|
||||
}
|
||||
|
||||
// deriveTaskUrgencyForRead 对任务做"读时紧急性派生",并收集需要异步落库的任务 ID。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只在内存里改本次返回值,不写 DB;
|
||||
// 2. 只做"到线且未完成任务"的优先级映射;
|
||||
// 3. 不处理去重锁和事件发布。
|
||||
//
|
||||
// 返回语义:
|
||||
// 1. 第一个返回值:可直接用于响应前端的派生任务切片;
|
||||
// 2. 第二个返回值:需要发"异步平移事件"的任务 ID 列表(可能为空)。
|
||||
func deriveTaskUrgencyForRead(tasks []model.Task, now time.Time) ([]model.Task, []int) {
|
||||
// 1. 拷贝切片,避免修改调用方持有的原始数据。
|
||||
derived := make([]model.Task, len(tasks))
|
||||
copy(derived, tasks)
|
||||
|
||||
pendingPromoteTaskIDs := make([]int, 0, len(derived))
|
||||
|
||||
// 2. 逐条判断是否满足"自动平移"条件。
|
||||
for idx := range derived {
|
||||
current := &derived[idx]
|
||||
|
||||
// 2.1 已完成任务不参与平移。
|
||||
if current.IsCompleted {
|
||||
continue
|
||||
}
|
||||
// 2.2 没有分界线的任务不参与平移。
|
||||
if current.UrgencyThresholdAt == nil {
|
||||
continue
|
||||
}
|
||||
// 2.3 尚未到分界线,不平移。
|
||||
if current.UrgencyThresholdAt.After(now) {
|
||||
continue
|
||||
}
|
||||
|
||||
// 2.4 到线后,仅把"不紧急象限"平移到对应"紧急象限"。
|
||||
// 2.4.1 重要不紧急(2) -> 重要且紧急(1)
|
||||
// 2.4.2 不简单不重要(4) -> 简单不重要(3)
|
||||
switch current.Priority {
|
||||
case 2:
|
||||
current.Priority = 1
|
||||
pendingPromoteTaskIDs = append(pendingPromoteTaskIDs, current.ID)
|
||||
case 4:
|
||||
current.Priority = 3
|
||||
pendingPromoteTaskIDs = append(pendingPromoteTaskIDs, current.ID)
|
||||
default:
|
||||
// 2.4.3 其他优先级不处理(包含已经是 1/3 的情况)。
|
||||
}
|
||||
}
|
||||
return derived, pendingPromoteTaskIDs
|
||||
}
|
||||
|
||||
// tryEnqueueTaskUrgencyPromote 尝试发布"任务紧急性平移请求"事件。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责 Redis 去重锁 + outbox 发布;
|
||||
// 2. 不负责真正落库(由消费者负责);
|
||||
// 3. 发布失败时要释放本次抢到的去重锁,避免任务被长时间"误判已投递"。
|
||||
func (ts *TaskService) tryEnqueueTaskUrgencyPromote(ctx context.Context, userID int, taskIDs []int) {
|
||||
// 1. 基础兜底:无发布器或无候选任务时直接返回。
|
||||
if ts.eventPublisher == nil || userID <= 0 || len(taskIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 2. 先做任务 ID 清洗,避免无效 ID 参与去重与发布。
|
||||
validTaskIDs := compactPositiveUniqueTaskIDs(taskIDs)
|
||||
if len(validTaskIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 3. 逐个抢 SETNX 去重锁:
|
||||
// 3.1 抢到锁才允许进入本次发布;
|
||||
// 3.2 抢不到说明已有请求在途,本次跳过即可;
|
||||
// 3.3 抢锁失败只记录日志,不中断主流程。
|
||||
lockedTaskIDs := make([]int, 0, len(validTaskIDs))
|
||||
lockedKeys := make([]string, 0, len(validTaskIDs))
|
||||
for _, taskID := range validTaskIDs {
|
||||
lockKey := fmt.Sprintf(taskUrgencyPromoteDedupeKeyFmt, userID, taskID)
|
||||
locked, lockErr := ts.cache.AcquireLock(ctx, lockKey, taskUrgencyPromoteDedupeTTL)
|
||||
if lockErr != nil {
|
||||
log.Printf("任务平移去重锁获取失败: user_id=%d task_id=%d err=%v", userID, taskID, lockErr)
|
||||
continue
|
||||
}
|
||||
if !locked {
|
||||
continue
|
||||
}
|
||||
lockedTaskIDs = append(lockedTaskIDs, taskID)
|
||||
lockedKeys = append(lockedKeys, lockKey)
|
||||
}
|
||||
if len(lockedTaskIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 4. 发布 outbox 事件:这里只保证"成功入 outbox 或返回错误",不等待消费者执行完成。
|
||||
publishErr := eventsvc.PublishTaskUrgencyPromoteRequested(ctx, ts.eventPublisher, model.TaskUrgencyPromoteRequestedPayload{
|
||||
UserID: userID,
|
||||
TaskIDs: lockedTaskIDs,
|
||||
TriggeredAt: time.Now(),
|
||||
})
|
||||
if publishErr != nil {
|
||||
// 4.1 失败回滚:释放本次抢到的去重锁,避免后续请求因误锁而无法再投递。
|
||||
ts.releaseTaskPromoteLocks(lockedKeys)
|
||||
log.Printf("任务平移事件发布失败: user_id=%d task_ids=%v err=%v", userID, lockedTaskIDs, publishErr)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("任务平移事件已发布: user_id=%d task_ids=%v", userID, lockedTaskIDs)
|
||||
}
|
||||
|
||||
// releaseTaskPromoteLocks 释放任务平移去重锁。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 仅用于"发布失败回滚"场景;
|
||||
// 2. 使用 Background 避免请求上下文已取消时导致锁释放失败。
|
||||
func (ts *TaskService) releaseTaskPromoteLocks(lockKeys []string) {
|
||||
if len(lockKeys) == 0 {
|
||||
return
|
||||
}
|
||||
releaseCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for _, key := range lockKeys {
|
||||
if err := ts.cache.ReleaseLock(releaseCtx, key); err != nil {
|
||||
log.Printf("任务平移去重锁释放失败: key=%s err=%v", key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compactPositiveUniqueTaskIDs 对任务 ID 做"过滤非正数 + 去重"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做参数清洗;
|
||||
// 2. 不承载业务规则判断。
|
||||
func compactPositiveUniqueTaskIDs(taskIDs []int) []int {
|
||||
return compactPositiveUniqueTaskIDsWithLimit(taskIDs, 0)
|
||||
}
|
||||
|
||||
// compactPositiveUniqueTaskIDsWithLimit 对任务 ID 做"过滤非正数 + 去重 + 可选限量"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做纯参数归一化,不查询任务、不判断权限;
|
||||
// 2. limit <= 0 表示不限制数量,供既有调用保持原行为;
|
||||
// 3. 达到 limit 后立即停止扫描,避免超长请求继续消耗 CPU。
|
||||
func compactPositiveUniqueTaskIDsWithLimit(taskIDs []int, limit int) []int {
|
||||
seen := make(map[int]struct{}, len(taskIDs))
|
||||
result := make([]int, 0, len(taskIDs))
|
||||
for _, taskID := range taskIDs {
|
||||
if taskID <= 0 {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[taskID]; exists {
|
||||
continue
|
||||
}
|
||||
seen[taskID] = struct{}{}
|
||||
result = append(result, taskID)
|
||||
if limit > 0 && len(result) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// UpdateTask 更新用户指定任务的属性(部分更新)。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责参数校验:task_id 合法性、priority_group 范围;
|
||||
// 2. 负责将请求 DTO 转换为 DAO 层的 updates map;
|
||||
// 3. 空请求体(无字段需要更新)返回明确业务错误;
|
||||
// 4. 不负责缓存删除(由 GORM cache_deleter 回调自动处理)。
|
||||
func (ts *TaskService) UpdateTask(ctx context.Context, req *model.UserUpdateTaskRequest, userID int) (model.GetUserTaskResp, error) {
|
||||
// 1. 参数兜底。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return model.GetUserTaskResp{}, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 构造 updates map:只有非 nil 的字段才写入。
|
||||
updates := make(map[string]interface{})
|
||||
if req.Title != nil {
|
||||
updates["title"] = *req.Title
|
||||
}
|
||||
if req.PriorityGroup != nil {
|
||||
// 2.1 优先级范围校验:当前任务体系只允许 1~4。
|
||||
if *req.PriorityGroup < 1 || *req.PriorityGroup > 4 {
|
||||
return model.GetUserTaskResp{}, respond.InvalidPriority
|
||||
}
|
||||
// 2.2 JSON 字段名是 priority_group,数据库列名是 priority。
|
||||
updates["priority"] = *req.PriorityGroup
|
||||
}
|
||||
if req.DeadlineAt != nil {
|
||||
updates["deadline_at"] = *req.DeadlineAt
|
||||
}
|
||||
if req.UrgencyThresholdAt != nil {
|
||||
updates["urgency_threshold_at"] = *req.UrgencyThresholdAt
|
||||
}
|
||||
|
||||
// 3. 空更新检测:至少需要一个可更新字段。
|
||||
if len(updates) == 0 {
|
||||
return model.GetUserTaskResp{}, respond.TaskUpdateNoFields
|
||||
}
|
||||
|
||||
// 4. 调用 DAO 执行更新。
|
||||
updatedTask, err := ts.dao.UpdateTaskByID(ctx, userID, req.TaskID, updates)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return model.GetUserTaskResp{}, respond.WrongTaskID
|
||||
}
|
||||
return model.GetUserTaskResp{}, err
|
||||
}
|
||||
ts.syncActiveScheduleJobBestEffort(ctx, updatedTask)
|
||||
|
||||
// 5. 转换为响应 DTO。
|
||||
return conv.ModelToGetUserTaskResp(updatedTask), nil
|
||||
}
|
||||
|
||||
// DeleteTask 永久删除用户指定任务。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责入参校验与业务错误映射;
|
||||
// 2. 负责调用 DAO 执行硬删除;
|
||||
// 3. 任务不存在时返回幂等信息码(TaskAlreadyDeleted);
|
||||
// 4. 不负责缓存删除(由 GORM cache_deleter 回调自动处理)。
|
||||
func (ts *TaskService) DeleteTask(ctx context.Context, req *model.UserCompleteTaskRequest, userID int) (int, error) {
|
||||
// 1. 参数兜底。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return 0, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 调用 DAO 执行删除。
|
||||
deletedTask, err := ts.dao.DeleteTaskByID(ctx, userID, req.TaskID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// 2.1 任务不存在或不属于当前用户:按幂等语义返回信息码。
|
||||
return 0, respond.TaskAlreadyDeleted
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
ts.cancelActiveScheduleJobBestEffort(ctx, deletedTask.UserID, deletedTask.ID, "task_deleted")
|
||||
|
||||
return deletedTask.ID, nil
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// syncActiveScheduleJobBestEffort 在任务变更后同步主动调度 due job。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只维护 important_urgent_task 的 job,不直接触发主动调度主链路;
|
||||
// 2. 任务未完成且存在 urgency_threshold_at 时 upsert pending job;
|
||||
// 3. 任务已完成或阈值为空时取消当前 pending job;
|
||||
// 4. 当前任务接口尚未整体事务化,job 同步失败只记日志,避免任务主写入出现“已落库但接口失败”的更差体验。
|
||||
func (ts *TaskService) syncActiveScheduleJobBestEffort(ctx context.Context, task *model.Task) {
|
||||
if ts == nil || ts.activeScheduleDAO == nil || task == nil {
|
||||
return
|
||||
}
|
||||
if task.IsCompleted || task.UrgencyThresholdAt == nil {
|
||||
ts.cancelActiveScheduleJobBestEffort(ctx, task.UserID, task.ID, "task_not_schedulable")
|
||||
return
|
||||
}
|
||||
|
||||
job := &model.ActiveScheduleJob{
|
||||
ID: activeScheduleJobID(task.UserID, task.ID),
|
||||
UserID: task.UserID,
|
||||
TaskID: task.ID,
|
||||
TriggerType: model.ActiveScheduleTriggerTypeImportantUrgentTask,
|
||||
Status: model.ActiveScheduleJobStatusPending,
|
||||
TriggerAt: *task.UrgencyThresholdAt,
|
||||
DedupeKey: activeScheduleTriggerDedupeKey(task.UserID, task.ID, *task.UrgencyThresholdAt),
|
||||
TraceID: activeScheduleTraceID(task.UserID, task.ID),
|
||||
}
|
||||
if err := ts.activeScheduleDAO.CreateOrUpdateJob(ctx, job); err != nil {
|
||||
log.Printf("主动调度 job upsert 失败: user_id=%d task_id=%d err=%v", task.UserID, task.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// cancelActiveScheduleJobBestEffort 取消任务当前待触发 job。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只取消 pending job,历史 triggered/skipped/failed 记录保留审计;
|
||||
// 2. 找不到 pending job 属于正常幂等场景;
|
||||
// 3. reason 只进入 last_error_code,方便后续排障知道取消来源。
|
||||
func (ts *TaskService) cancelActiveScheduleJobBestEffort(ctx context.Context, userID int, taskID int, reason string) {
|
||||
if ts == nil || ts.activeScheduleDAO == nil || userID <= 0 || taskID <= 0 {
|
||||
return
|
||||
}
|
||||
job, err := ts.activeScheduleDAO.FindPendingJobByTask(ctx, userID, taskID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return
|
||||
}
|
||||
log.Printf("主动调度 pending job 查询失败: user_id=%d task_id=%d err=%v", userID, taskID, err)
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
updates := map[string]any{
|
||||
"status": model.ActiveScheduleJobStatusCanceled,
|
||||
"last_error_code": reason,
|
||||
"last_scanned_at": &now,
|
||||
}
|
||||
if err = ts.activeScheduleDAO.UpdateJobFields(ctx, job.ID, updates); err != nil {
|
||||
log.Printf("主动调度 pending job 取消失败: user_id=%d task_id=%d job_id=%s err=%v", userID, taskID, job.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func activeScheduleJobID(userID int, taskID int) string {
|
||||
return fmt.Sprintf("asj_task_%d_%d", userID, taskID)
|
||||
}
|
||||
|
||||
func activeScheduleTraceID(userID int, taskID int) string {
|
||||
return fmt.Sprintf("trace_active_task_%d_%d", userID, taskID)
|
||||
}
|
||||
|
||||
func activeScheduleTriggerDedupeKey(userID int, taskID int, triggerAt time.Time) string {
|
||||
windowStart := triggerAt.Truncate(30 * time.Minute)
|
||||
return fmt.Sprintf("%d:%s:%s:%d:%s",
|
||||
userID,
|
||||
model.ActiveScheduleTriggerTypeImportantUrgentTask,
|
||||
model.ActiveScheduleTargetTypeTaskPool,
|
||||
taskID,
|
||||
windowStart.Format(time.RFC3339),
|
||||
)
|
||||
}
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
// IsPreviewExpired 判断 preview 是否已经超过确认有效期。
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
func candidateDTO(item candidate.Candidate) CandidateDTO {
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/candidate"
|
||||
schedulercontext "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/context"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/selection"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
)
|
||||
|
||||
// EnqueueActiveScheduleTriggeredInTx 在事务内写入 active_schedule.triggered outbox 消息。
|
||||
|
||||
@@ -7,14 +7,14 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
activepreview "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/preview"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/google/uuid"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
|
||||
@@ -3,10 +3,9 @@ package dao
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
coremodel "github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/spf13/viper"
|
||||
"gorm.io/driver/mysql"
|
||||
coremodel "github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
mysqlinfra "github.com/LoveLosita/smartflow/backend/shared/infra/mysql"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -17,18 +16,7 @@ import (
|
||||
// 2. 不迁移 task、schedule、agent、notification 或 user/auth 表,避免独立进程越权管理其它服务模型;
|
||||
// 3. 返回的 *gorm.DB 供服务内主链路、due job scanner 和 outbox consumer 复用。
|
||||
func OpenDBFromConfig() (*gorm.DB, error) {
|
||||
host := viper.GetString("database.host")
|
||||
port := viper.GetString("database.port")
|
||||
user := viper.GetString("database.user")
|
||||
password := viper.GetString("database.password")
|
||||
dbname := viper.GetString("database.dbname")
|
||||
|
||||
dsn := fmt.Sprintf(
|
||||
"%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||
user, password, host, port, dbname,
|
||||
)
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
|
||||
db, err := mysqlinfra.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc/pb"
|
||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
|
||||
@@ -8,10 +8,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeapply "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/apply"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
@@ -22,8 +18,12 @@ import (
|
||||
activesvc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/service"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
// ScheduleStateToPreview 将 agent 的 ScheduleState 转换为前端预览缓存格式。
|
||||
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
baseconv "github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
|
||||
baseconv "github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
// ScheduleProvider 实现 model.ScheduleStateProvider 接口。
|
||||
|
||||
@@ -3,8 +3,8 @@ package agentconv
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
// WindowDay 表示排课窗口中的一天(相对周 + 周几)。
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
package agentconv
|
||||
|
||||
import (
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
schedule "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
// ApplyPlacedItems 将前端提交的绝对时间放置项应用到 ScheduleState。
|
||||
|
||||
@@ -8,13 +8,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
taskmodel "github.com/LoveLosita/smartflow/backend/model"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
|
||||
agentrouter "github.com/LoveLosita/smartflow/backend/services/agent/router"
|
||||
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
taskmodel "github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/cloudwego/eino/schema"
|
||||
)
|
||||
|
||||
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
|
||||
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
"github.com/cloudwego/eino/schema"
|
||||
@@ -78,7 +78,7 @@ func compactUnifiedMessagesIfNeeded(
|
||||
msg3 := messages[3].Content
|
||||
|
||||
// 3. Token 预算检查。
|
||||
breakdown, overBudget, needCompactMsg1, needCompactMsg2 := pkg.CheckStageTokenBudget(msg0, msg1, msg2, msg3)
|
||||
breakdown, overBudget, needCompactMsg1, needCompactMsg2 := agentshared.CheckStageTokenBudget(msg0, msg1, msg2, msg3)
|
||||
|
||||
log.Printf(
|
||||
"[COMPACT:%s] token budget check: total=%d budget=%d over=%v compactMsg1=%v compactMsg2=%v (msg0=%d msg1=%d msg2=%d msg3=%d)",
|
||||
@@ -97,14 +97,14 @@ func compactUnifiedMessagesIfNeeded(
|
||||
msg1 = compactUnifiedMsg1(ctx, input, msg1)
|
||||
messages[1].Content = msg1
|
||||
// 压缩 msg1 后重算预算。
|
||||
breakdown = pkg.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
breakdown = agentshared.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
}
|
||||
|
||||
// 6. msg2 压缩(阶段工作区 → LLM 摘要)。
|
||||
if needCompactMsg2 || breakdown.Total > pkg.StageTokenBudget {
|
||||
if needCompactMsg2 || breakdown.Total > agentshared.StageTokenBudget {
|
||||
msg2 = compactUnifiedMsg2(ctx, input, msg2)
|
||||
messages[2].Content = msg2
|
||||
breakdown = pkg.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
breakdown = agentshared.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
}
|
||||
|
||||
// 7. 记录最终 token 分布。
|
||||
@@ -124,8 +124,8 @@ func compactUnifiedMessagesIfNeeded(
|
||||
// 1. 先按消息类型汇总 token,保证总量准确;
|
||||
// 2. 再把最后一个 user 消息尽量视作 msg3,保留阶段指令语义;
|
||||
// 3. 其他历史内容归入 msg1 / msg2,确保上下文统计不会因为结构不标准而断更。
|
||||
func estimateFallbackStageTokenBreakdown(messages []*schema.Message) pkg.StageTokenBreakdown {
|
||||
breakdown := pkg.StageTokenBreakdown{Budget: pkg.StageTokenBudget}
|
||||
func estimateFallbackStageTokenBreakdown(messages []*schema.Message) agentshared.StageTokenBreakdown {
|
||||
breakdown := agentshared.StageTokenBreakdown{Budget: agentshared.StageTokenBudget}
|
||||
if len(messages) == 0 {
|
||||
return breakdown
|
||||
}
|
||||
@@ -146,7 +146,7 @@ func estimateFallbackStageTokenBreakdown(messages []*schema.Message) pkg.StageTo
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
tokens := pkg.EstimateMessageTokens(msg)
|
||||
tokens := agentshared.EstimateMessageTokens(msg)
|
||||
breakdown.Total += tokens
|
||||
|
||||
switch msg.Role {
|
||||
@@ -199,7 +199,7 @@ func compactUnifiedMsg1(
|
||||
}
|
||||
|
||||
// 3. SSE: 压缩开始。
|
||||
tokenBefore := pkg.EstimateTextTokens(msg1)
|
||||
tokenBefore := agentshared.EstimateTextTokens(msg1)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_start",
|
||||
fmt.Sprintf("正在压缩对话历史(%d tokens)...", tokenBefore),
|
||||
@@ -219,7 +219,7 @@ func compactUnifiedMsg1(
|
||||
}
|
||||
|
||||
// 5. SSE: 压缩完成。
|
||||
tokenAfter := pkg.EstimateTextTokens(newSummary)
|
||||
tokenAfter := agentshared.EstimateTextTokens(newSummary)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_done",
|
||||
fmt.Sprintf("对话历史已压缩:%d → %d tokens", tokenBefore, tokenAfter),
|
||||
@@ -246,7 +246,7 @@ func compactUnifiedMsg2(
|
||||
msg2 string,
|
||||
) string {
|
||||
// 1. SSE: 压缩开始。
|
||||
tokenBefore := pkg.EstimateTextTokens(msg2)
|
||||
tokenBefore := agentshared.EstimateTextTokens(msg2)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_start",
|
||||
fmt.Sprintf("正在压缩执行记录(%d tokens)...", tokenBefore),
|
||||
@@ -266,7 +266,7 @@ func compactUnifiedMsg2(
|
||||
}
|
||||
|
||||
// 3. SSE: 压缩完成。
|
||||
tokenAfter := pkg.EstimateTextTokens(compressed)
|
||||
tokenAfter := agentshared.EstimateTextTokens(compressed)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_done",
|
||||
fmt.Sprintf("执行记录已压缩:%d → %d tokens", tokenBefore, tokenAfter),
|
||||
@@ -285,7 +285,7 @@ func compactUnifiedMsg2(
|
||||
func saveUnifiedTokenStats(
|
||||
ctx context.Context,
|
||||
input UnifiedCompactInput,
|
||||
breakdown pkg.StageTokenBreakdown,
|
||||
breakdown agentshared.StageTokenBreakdown,
|
||||
) {
|
||||
if input.CompactionStore == nil || input.FlowState == nil {
|
||||
return
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
@@ -6,11 +6,11 @@ import (
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/rpc/pb"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
agentcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/agent"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
type Handler struct {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package agentshared
|
||||
|
||||
import "github.com/LoveLosita/smartflow/backend/model"
|
||||
import "github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
|
||||
func CloneWeekSchedules(src []model.UserWeekSchedule) []model.UserWeekSchedule {
|
||||
if len(src) == 0 {
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
@@ -77,7 +76,7 @@ func CompactUnifiedMessagesIfNeeded(
|
||||
msg3 := messages[3].Content
|
||||
|
||||
// 3. 执行 token 预算检查,判断是否需要压缩历史对话或阶段工作区。
|
||||
breakdown, overBudget, needCompactMsg1, needCompactMsg2 := pkg.CheckStageTokenBudget(msg0, msg1, msg2, msg3)
|
||||
breakdown, overBudget, needCompactMsg1, needCompactMsg2 := CheckStageTokenBudget(msg0, msg1, msg2, msg3)
|
||||
|
||||
log.Printf(
|
||||
"[COMPACT:%s] token budget check: total=%d budget=%d over=%v compactMsg1=%v compactMsg2=%v (msg0=%d msg1=%d msg2=%d msg3=%d)",
|
||||
@@ -95,14 +94,14 @@ func CompactUnifiedMessagesIfNeeded(
|
||||
if needCompactMsg1 {
|
||||
msg1 = compactUnifiedMsg1(ctx, input, msg1)
|
||||
messages[1].Content = msg1
|
||||
breakdown = pkg.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
breakdown = EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
}
|
||||
|
||||
// 6. 若 msg1 压缩后仍超限,再压缩 msg2(阶段工作区 / ReAct 记录)。
|
||||
if needCompactMsg2 || breakdown.Total > pkg.StageTokenBudget {
|
||||
if needCompactMsg2 || breakdown.Total > StageTokenBudget {
|
||||
msg2 = compactUnifiedMsg2(ctx, input, msg2)
|
||||
messages[2].Content = msg2
|
||||
breakdown = pkg.EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
breakdown = EstimateStageMessagesTokens(msg0, msg1, msg2, msg3)
|
||||
}
|
||||
|
||||
// 7. 记录最终 token 分布,供后续调试与监控使用。
|
||||
@@ -122,8 +121,8 @@ func CompactUnifiedMessagesIfNeeded(
|
||||
// 1. 先按消息类型汇总 token,保证总量准确;
|
||||
// 2. 再把最后一个 user 消息尽量视作 msg3,保留阶段指令语义;
|
||||
// 3. 其他历史内容归入 msg1 / msg2,确保上下文统计不会因为结构不标准而断更。
|
||||
func estimateFallbackStageTokenBreakdown(messages []*schema.Message) pkg.StageTokenBreakdown {
|
||||
breakdown := pkg.StageTokenBreakdown{Budget: pkg.StageTokenBudget}
|
||||
func estimateFallbackStageTokenBreakdown(messages []*schema.Message) StageTokenBreakdown {
|
||||
breakdown := StageTokenBreakdown{Budget: StageTokenBudget}
|
||||
if len(messages) == 0 {
|
||||
return breakdown
|
||||
}
|
||||
@@ -144,7 +143,7 @@ func estimateFallbackStageTokenBreakdown(messages []*schema.Message) pkg.StageTo
|
||||
if msg == nil {
|
||||
continue
|
||||
}
|
||||
tokens := pkg.EstimateMessageTokens(msg)
|
||||
tokens := EstimateMessageTokens(msg)
|
||||
breakdown.Total += tokens
|
||||
|
||||
switch msg.Role {
|
||||
@@ -194,7 +193,7 @@ func compactUnifiedMsg1(
|
||||
log.Printf("[COMPACT:%s] load existing compaction failed: %v, proceed without cache", input.StageName, err)
|
||||
}
|
||||
|
||||
tokenBefore := pkg.EstimateTextTokens(msg1)
|
||||
tokenBefore := EstimateTextTokens(msg1)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_start",
|
||||
fmt.Sprintf("正在压缩对话历史(%d tokens)...", tokenBefore),
|
||||
@@ -212,7 +211,7 @@ func compactUnifiedMsg1(
|
||||
return msg1
|
||||
}
|
||||
|
||||
tokenAfter := pkg.EstimateTextTokens(newSummary)
|
||||
tokenAfter := EstimateTextTokens(newSummary)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_done",
|
||||
fmt.Sprintf("对话历史已压缩:%d → %d tokens", tokenBefore, tokenAfter),
|
||||
@@ -237,7 +236,7 @@ func compactUnifiedMsg2(
|
||||
input UnifiedCompactInput,
|
||||
msg2 string,
|
||||
) string {
|
||||
tokenBefore := pkg.EstimateTextTokens(msg2)
|
||||
tokenBefore := EstimateTextTokens(msg2)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_start",
|
||||
fmt.Sprintf("正在压缩执行记录(%d tokens)...", tokenBefore),
|
||||
@@ -255,7 +254,7 @@ func compactUnifiedMsg2(
|
||||
return msg2
|
||||
}
|
||||
|
||||
tokenAfter := pkg.EstimateTextTokens(compressed)
|
||||
tokenAfter := EstimateTextTokens(compressed)
|
||||
_ = input.Emitter.EmitStatus(
|
||||
input.StatusBlockID, input.StageName, "context_compact_done",
|
||||
fmt.Sprintf("执行记录已压缩:%d → %d tokens", tokenBefore, tokenAfter),
|
||||
@@ -274,7 +273,7 @@ func compactUnifiedMsg2(
|
||||
func saveUnifiedTokenStats(
|
||||
ctx context.Context,
|
||||
input UnifiedCompactInput,
|
||||
breakdown pkg.StageTokenBreakdown,
|
||||
breakdown StageTokenBreakdown,
|
||||
) {
|
||||
if input.CompactionStore == nil || input.FlowState == nil {
|
||||
return
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package pkg
|
||||
package agentshared
|
||||
|
||||
import (
|
||||
"math"
|
||||
@@ -9,18 +9,18 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
|
||||
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/cloudwego/eino/schema"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
@@ -333,7 +333,7 @@ func (s *AgentService) runNormalChatFlow(
|
||||
if chatHistory == nil {
|
||||
// 2. 缓存未命中时回源 DB,并转换为 Eino message 格式。
|
||||
cacheMiss = true
|
||||
histories, hisErr := s.repo.GetUserChatHistories(ctx, userID, pkg.HistoryFetchLimitByModel(resolvedModelName), chatID)
|
||||
histories, hisErr := s.repo.GetUserChatHistories(ctx, userID, agentshared.HistoryFetchLimitByModel(resolvedModelName), chatID)
|
||||
if hisErr != nil {
|
||||
pushErrNonBlocking(errChan, hisErr)
|
||||
return
|
||||
@@ -343,12 +343,12 @@ func (s *AgentService) runNormalChatFlow(
|
||||
|
||||
// 3. 计算本次请求可用的历史 token 预算,并执行历史裁剪。
|
||||
// 这样可以在上下文增长时稳定控制模型窗口,避免超长上下文引发报错或高延迟。
|
||||
historyBudget := pkg.HistoryTokenBudgetByModel(resolvedModelName, agentprompt.SystemPrompt, userMessage)
|
||||
trimmedHistory, totalHistoryTokens, keptHistoryTokens, droppedCount := pkg.TrimHistoryByTokenBudget(chatHistory, historyBudget)
|
||||
historyBudget := agentshared.HistoryTokenBudgetByModel(resolvedModelName, agentprompt.SystemPrompt, userMessage)
|
||||
trimmedHistory, totalHistoryTokens, keptHistoryTokens, droppedCount := agentshared.TrimHistoryByTokenBudget(chatHistory, historyBudget)
|
||||
chatHistory = trimmedHistory
|
||||
|
||||
// 4. 根据裁剪后历史长度更新 Redis 会话窗口配置,并主动执行窗口收敛。
|
||||
targetWindow := pkg.CalcSessionWindowSize(len(chatHistory))
|
||||
targetWindow := agentshared.CalcSessionWindowSize(len(chatHistory))
|
||||
if err = s.agentCache.SetSessionWindowSize(ctx, chatID, targetWindow); err != nil {
|
||||
log.Printf("设置历史窗口失败 chat=%s: %v", chatID, err)
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/cloudwego/eino/schema"
|
||||
)
|
||||
|
||||
|
||||
@@ -17,12 +17,12 @@ import (
|
||||
"github.com/cloudwego/eino/schema"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
agentprompt "github.com/LoveLosita/smartflow/backend/services/agent/prompt"
|
||||
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -410,7 +410,7 @@ func (s *AgentService) loadConversationContext(ctx context.Context, chatID, user
|
||||
|
||||
// 缓存未命中时回源 DB。
|
||||
if history == nil {
|
||||
histories, hisErr := s.repo.GetUserChatHistories(ctx, 0, pkg.HistoryFetchLimitByModel("worker"), chatID)
|
||||
histories, hisErr := s.repo.GetUserChatHistories(ctx, 0, agentshared.HistoryFetchLimitByModel("worker"), chatID)
|
||||
if hisErr != nil {
|
||||
log.Printf("从 DB 加载历史失败 chat=%s: %v", chatID, hisErr)
|
||||
} else {
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"github.com/cloudwego/eino/schema"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
// GetSchedulePlanPreview 按 conversation_id 读取结构化排程预览。
|
||||
|
||||
@@ -7,11 +7,11 @@ import (
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
agentshared "github.com/LoveLosita/smartflow/backend/services/agent/shared"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
// SaveScheduleState 处理前端拖拽后的“暂存排程状态”请求。
|
||||
|
||||
@@ -7,9 +7,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
func (s *AgentService) QueryTasksForTool(ctx context.Context, req agentmodel.TaskQueryRequest) ([]agentmodel.TaskQueryTaskRecord, error) {
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只读取候选记忆,不暴露管理写接口;
|
||||
// 2. 不要求调用方知道 gateway/client/memory 的具体实现;
|
||||
// 2. 不要求调用方知道 backend/client/memory 的具体实现;
|
||||
// 3. 错误原样返回给预取链路,由 agent 侧负责软降级和观测记录。
|
||||
type MemoryRPCReaderClient interface {
|
||||
Retrieve(ctx context.Context, req memorycontracts.RetrieveRequest) ([]memorycontracts.ItemDTO, error)
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
agentconv "github.com/LoveLosita/smartflow/backend/services/agent/conv"
|
||||
scheduletool "github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||
taskclasscontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclass"
|
||||
)
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
taskclasscontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclass"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,10 +7,10 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
agentmodel "github.com/LoveLosita/smartflow/backend/services/agent/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
taskcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/task"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
)
|
||||
|
||||
const quickTaskCreateRPCTimeout = 3 * time.Second
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
// TaskClassUpsertInput 描述任务类写库工具的标准化入参。
|
||||
|
||||
@@ -3,9 +3,9 @@ package agenttools
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/tools/schedule"
|
||||
taskclassresult "github.com/LoveLosita/smartflow/backend/services/agent/tools/taskclass_result"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
type taskClassUpsertExecutionInput struct {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user