Version: 0.9.71.dev.260504

后端:
1.阶段 5 task 服务边界落地
- 新增 cmd/task 与 services/task/{dao,rpc,sv},承载 task zrpc、tasks 表迁移和 task outbox 消费边界
- 新增 gateway/client/task、shared/contracts/task 和 task port,gateway /api/v1/task/* 切到 task zrpc client
- 将 task.urgency.promote.requested handler / relay / retry loop 迁入 cmd/task,单体 worker 不再消费 task outbox
- 保留单体 Agent 残留 task 查询的 publish-only 写入能力,避免迁移期 task 事件丢失
- active-scheduler task facts / due job scanner 切到 task RPC,并移除启动期 tasks 表依赖检查
- 更新阶段 5 文档,记录 task 切流点、旧实现保留、跨域 DB 依赖缩减和下一轮建议
- 补充 task rpc 示例配置
This commit is contained in:
Losita
2026-05-05 00:00:09 +08:00
parent 29b8cf0ada
commit 6843c7efac
27 changed files with 2552 additions and 146 deletions

View File

@@ -0,0 +1,152 @@
package adapters
import (
"context"
"encoding/json"
"errors"
"fmt"
"strings"
"time"
activeports "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
taskpb "github.com/LoveLosita/smartflow/backend/services/task/rpc/pb"
taskcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/task"
"github.com/zeromicro/go-zero/zrpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
const (
defaultTaskRPCEndpoint = "127.0.0.1:9085"
defaultTaskRPCTimeout = 6 * time.Second
)
type TaskRPCConfig struct {
Endpoints []string
Target string
Timeout time.Duration
}
// TaskRPCAdapter 是 active-scheduler 访问 task 服务的 RPC 适配器。
//
// 职责边界:
// 1. 只读取 task_pool 事实并转换为 active-scheduler 内部 DTO
// 2. 不写 tasks 表、不维护 task outbox也不处理 due job 状态;
// 3. 让 active-scheduler dry-run / due scanner 不再直接访问 tasks 表。
type TaskRPCAdapter struct {
rpc taskpb.TaskClient
}
func NewTaskRPCAdapter(cfg TaskRPCConfig) (*TaskRPCAdapter, error) {
timeout := cfg.Timeout
if timeout <= 0 {
timeout = defaultTaskRPCTimeout
}
endpoints := normalizeTaskRPCEndpoints(cfg.Endpoints)
target := strings.TrimSpace(cfg.Target)
if len(endpoints) == 0 && target == "" {
endpoints = []string{defaultTaskRPCEndpoint}
}
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
Endpoints: endpoints,
Target: target,
NonBlock: true,
Timeout: int64(timeout / time.Millisecond),
})
if err != nil {
return nil, err
}
adapter := &TaskRPCAdapter{rpc: taskpb.NewTaskClient(zclient.Conn())}
if err := adapter.ping(timeout); err != nil {
return nil, err
}
return adapter, nil
}
func (a *TaskRPCAdapter) GetTaskForActiveSchedule(ctx context.Context, req activeports.TaskRequest) (activeports.TaskFact, bool, error) {
if err := a.ensureReady(); err != nil {
return activeports.TaskFact{}, false, err
}
payload, err := json.Marshal(taskcontracts.TaskFactRequest{
UserID: req.UserID,
TaskID: req.TaskID,
Now: req.Now,
})
if err != nil {
return activeports.TaskFact{}, false, err
}
resp, err := a.rpc.GetTaskForActiveSchedule(ctx, &taskpb.JSONRequest{PayloadJson: payload})
if err != nil {
return activeports.TaskFact{}, false, taskRPCError(err)
}
var contractResp taskcontracts.TaskFactResponse
if err := json.Unmarshal(taskJSONBytes(resp), &contractResp); err != nil {
return activeports.TaskFact{}, false, err
}
return taskFactToActive(contractResp.Task), contractResp.Found, nil
}
func (a *TaskRPCAdapter) ensureReady() error {
if a == nil || a.rpc == nil {
return errors.New("task rpc adapter 未初始化")
}
return nil
}
func (a *TaskRPCAdapter) ping(timeout time.Duration) error {
if err := a.ensureReady(); err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
_, err := a.rpc.Ping(ctx, &taskpb.StatusResponse{})
return taskRPCError(err)
}
func taskRPCError(err error) error {
if err == nil {
return nil
}
st, ok := status.FromError(err)
if !ok {
return err
}
if st.Code() == codes.NotFound {
return nil
}
if st.Code() == codes.Internal || st.Code() == codes.Unavailable || st.Code() == codes.DeadlineExceeded {
return fmt.Errorf("调用 task zrpc 服务失败: %w", err)
}
return err
}
func taskFactToActive(task taskcontracts.TaskFact) activeports.TaskFact {
return activeports.TaskFact{
ID: task.ID,
UserID: task.UserID,
Title: task.Title,
Priority: task.Priority,
IsCompleted: task.IsCompleted,
DeadlineAt: task.DeadlineAt,
UrgencyThresholdAt: task.UrgencyThresholdAt,
EstimatedSections: task.EstimatedSections,
}
}
func taskJSONBytes(resp *taskpb.JSONResponse) []byte {
if resp == nil || len(resp.DataJson) == 0 {
return []byte("null")
}
return resp.DataJson
}
func normalizeTaskRPCEndpoints(values []string) []string {
endpoints := make([]string, 0, len(values))
for _, value := range values {
trimmed := strings.TrimSpace(value)
if trimmed != "" {
endpoints = append(endpoints, trimmed)
}
}
return endpoints
}

View File

@@ -75,7 +75,7 @@ type runtimeDependencyTable struct {
//
// 职责边界:
// 1. 只检查表是否存在,不 AutoMigrate、不补列、不修改任何跨域表
// 2. 把 active-scheduler 运行时仍然需要的 task / agent / notification outbox 边界显式化;
// 2. 把 active-scheduler 运行时仍然需要的 agent / notification outbox 边界显式化;
// 3. 若部署顺序、库权限或表结构归属不满足,启动阶段直接 fail fast避免第一次 trigger 才反复重试。
func ensureRuntimeDependencyTables(db *gorm.DB) error {
if db == nil {
@@ -110,7 +110,7 @@ func ensureTableExists(db *gorm.DB, table runtimeDependencyTable) error {
// 说明:
// 1. active-scheduler 自有表在 OpenDBFromConfig 内迁移,这里只放跨域依赖;
// 2. notification outbox 表名来自 service catalog避免和 outbox 多表路由配置漂移;
// 3. schedule 读写已切到 schedule RPC后续切到 task/agent/notification RPC 或 read model 后,应继续移除对应表依赖。
// 3. schedule 与 task 事实读取已切到 RPC后续切到 agent/notification RPC 或 read model 后,应继续移除对应表依赖。
func activeSchedulerRuntimeDependencyTables() []runtimeDependencyTable {
notificationOutboxTable := "notification_outbox_messages"
if cfg, ok := outboxinfra.ResolveServiceConfig(outboxinfra.ServiceNotification); ok && cfg.TableName != "" {
@@ -118,7 +118,6 @@ func activeSchedulerRuntimeDependencyTables() []runtimeDependencyTable {
}
return []runtimeDependencyTable{
{Name: "tasks", Reason: "迁移期 dry-run / due job scanner 仍读取 task_pool 事实,下一轮切 task RPC 后移除"},
{Name: "agent_chats", Reason: "trigger 生成 preview 后预建主动调度会话"},
{Name: "chat_histories", Reason: "trigger 生成 preview 后写入会话首屏消息"},
{Name: "agent_timeline_events", Reason: "trigger 生成 preview 后写入主动调度时间线卡片"},

View File

@@ -39,6 +39,7 @@ type Options struct {
JobScanEvery time.Duration
JobScanLimit int
KafkaConfig kafkabus.Config
TaskRPC activeadapters.TaskRPCConfig
ScheduleRPC activeadapters.ScheduleRPCConfig
}
@@ -69,12 +70,15 @@ func New(db *gorm.DB, llmService *llmservice.Service, opts Options) (*Service, e
}
activeDAO := rootdao.NewActiveScheduleDAO(db)
activeReaders := activeadapters.NewGormReaders(db)
taskRPCAdapter, err := activeadapters.NewTaskRPCAdapter(opts.TaskRPC)
if err != nil {
return nil, fmt.Errorf("initialize task rpc adapter failed: %w", err)
}
scheduleRPCAdapter, err := activeadapters.NewScheduleRPCAdapter(opts.ScheduleRPC)
if err != nil {
return nil, fmt.Errorf("initialize schedule rpc adapter failed: %w", err)
}
readers := activeadapters.ReadersWithScheduleRPC(activeReaders, scheduleRPCAdapter)
readers := activeadapters.ReadersWithScheduleRPC(taskRPCAdapter, scheduleRPCAdapter)
dryRun, err := activesvc.NewDryRunService(readers)
if err != nil {
return nil, err