Version: 0.9.68.dev.260504

后端:
1. 阶段 3 notification 服务边界落地,新增 `cmd/notification`、`services/notification`、`gateway/notification`、`shared/contracts/notification` 和 notification port,按 userauth 同款最小手搓 zrpc 样板收口
2. notification outbox consumer、relay 和 retry loop 迁入独立服务入口,处理 `notification.feishu.requested`,gateway 改为通过 zrpc client 调用 notification
3. 清退旧单体 notification DAO/model/service/provider/runner 和 `service/events/notification_feishu.go`,旧实现不再作为活跃编译路径
4. 修复 outbox 路由归属、dispatch 启动扫描、Kafka topic 探测/投递超时、sending 租约恢复、毒消息 MarkDead 错误回传和 RPC timeout 边界
5. 同步调整 active-scheduler 触发通知事件、核心 outbox handler、MySQL 迁移边界和 notification 配置

文档:
1. 更新微服务迁移计划,将阶段 3 notification 标记为已完成,并明确下一阶段从 active-scheduler 开始
This commit is contained in:
Losita
2026-05-04 18:40:39 +08:00
parent 9742dc8b1c
commit abe3b4960e
41 changed files with 2178 additions and 889 deletions

View File

@@ -0,0 +1,44 @@
package sv
import (
"context"
"log"
"time"
)
// StartRetryLoop 启动 notification_records 重试扫描器。
//
// 说明:
// 1. 只在 worker/all 或独立 notification 进程启动API / RPC 入口不主动扫重试;
// 2. provider 失败后的重试由本循环负责,避免通用 outbox 被外部服务慢失败拖住;
// 3. 每轮失败只写日志,下一轮继续扫描。
func (s *Service) StartRetryLoop(ctx context.Context, every time.Duration, limit int) {
if s == nil {
return
}
if every <= 0 {
every = time.Minute
}
if limit <= 0 {
limit = 50
}
go func() {
ticker := time.NewTicker(every)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
result, err := s.RetryFeishuNotifications(ctx, time.Now(), limit)
if err != nil {
log.Printf("飞书通知重试扫描失败: err=%v", err)
continue
}
if result.Scanned > 0 {
log.Printf("飞书通知重试扫描完成: scanned=%d sent=%d failed=%d dead=%d skipped=%d", result.Scanned, result.Sent, result.Failed, result.Dead, result.Skipped)
}
}
}
}()
}