Compare commits
21 Commits
2a96f4c6f9
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1fa7d85c46 | ||
|
|
e79cfcc4b6 | ||
|
|
689ce60ab4 | ||
|
|
a0f8d6c8cc | ||
|
|
06d7eaeda0 | ||
|
|
6ff1b3a2f2 | ||
|
|
9025096bc6 | ||
|
|
cc98b62ad8 | ||
|
|
25a608eaeb | ||
|
|
7b04b073ce | ||
|
|
61db646805 | ||
|
|
7d324b77aa | ||
|
|
d4afc6ef74 | ||
|
|
33227e48a7 | ||
|
|
816a29c062 | ||
|
|
3b6fca44a6 | ||
|
|
2204fac84e | ||
|
|
c42f0c5b8c | ||
|
|
4fc6c0cac3 | ||
|
|
46874f0806 | ||
|
|
786c8925a0 |
41
.env.full.example
Normal file
41
.env.full.example
Normal file
@@ -0,0 +1,41 @@
|
||||
# SmartFlow 全栈容器化示例环境变量。
|
||||
#
|
||||
# 说明:
|
||||
# 1. 若国内服务器无法直接拉官方镜像,可把下列镜像名改成您已缓存或私有仓库中的地址。
|
||||
# 2. Compose 默认读取根目录 .env;请按需复制为 .env 后再启动。
|
||||
|
||||
SMARTFLOW_IMAGE_USERAUTH=smartflow/userauth:latest
|
||||
SMARTFLOW_IMAGE_NOTIFICATION=smartflow/notification:latest
|
||||
SMARTFLOW_IMAGE_ACTIVE_SCHEDULER=smartflow/active-scheduler:latest
|
||||
SMARTFLOW_IMAGE_SCHEDULE=smartflow/schedule:latest
|
||||
SMARTFLOW_IMAGE_TASK=smartflow/task:latest
|
||||
SMARTFLOW_IMAGE_TASK_CLASS=smartflow/task-class:latest
|
||||
SMARTFLOW_IMAGE_COURSE=smartflow/course:latest
|
||||
SMARTFLOW_IMAGE_MEMORY=smartflow/memory:latest
|
||||
SMARTFLOW_IMAGE_AGENT=smartflow/agent:latest
|
||||
SMARTFLOW_IMAGE_TASKCLASSFORUM=smartflow/taskclassforum:latest
|
||||
SMARTFLOW_IMAGE_TOKENSTORE=smartflow/tokenstore:latest
|
||||
SMARTFLOW_IMAGE_LLM=smartflow/llm:latest
|
||||
SMARTFLOW_IMAGE_API=smartflow/api:latest
|
||||
SMARTFLOW_IMAGE_FRONTEND=smartflow/frontend:latest
|
||||
ARK_API_KEY=
|
||||
SMARTFLOW_USERAUTH_ALLOWREGISTER=false
|
||||
SMARTFLOW_NOTIFICATION_FRONTENDBASEURL=https://smartflow.example.com
|
||||
SMARTFLOW_CORS_ALLOWEDORIGINS=http://localhost:5173,https://smartflow.example.com
|
||||
|
||||
SMARTFLOW_MYSQL_IMAGE=mysql:8.0
|
||||
SMARTFLOW_REDIS_IMAGE=redis:7
|
||||
SMARTFLOW_KAFKA_IMAGE=apache/kafka:3.7.2
|
||||
SMARTFLOW_ETCD_IMAGE=quay.io/coreos/etcd:v3.5.5
|
||||
SMARTFLOW_MINIO_IMAGE=minio/minio:RELEASE.2023-03-20T20-16-18Z
|
||||
SMARTFLOW_MILVUS_IMAGE=milvusdb/milvus:v2.4.4
|
||||
SMARTFLOW_ATTU_IMAGE=zilliz/attu:v2.4.3
|
||||
|
||||
SMARTFLOW_API_PORT=8080
|
||||
SMARTFLOW_FRONTEND_PORT=80
|
||||
SMARTFLOW_FRONTEND_HTTPS_PORT=443
|
||||
SMARTFLOW_MINIO_API_PORT=9000
|
||||
SMARTFLOW_MINIO_CONSOLE_PORT=9001
|
||||
SMARTFLOW_MILVUS_PORT=19530
|
||||
SMARTFLOW_MILVUS_HEALTH_PORT=9091
|
||||
SMARTFLOW_ATTU_PORT=8000
|
||||
222
.gitea/workflows/release-offline.yml
Normal file
222
.gitea/workflows/release-offline.yml
Normal file
@@ -0,0 +1,222 @@
|
||||
name: offline-release
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
base_ref:
|
||||
description: "Optional base ref for impact diff, defaults to HEAD^"
|
||||
required: false
|
||||
include_infra:
|
||||
description: "Whether to pack infra bundle too"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
jobs:
|
||||
build-upload:
|
||||
runs-on: local-build
|
||||
steps:
|
||||
- name: Prepare local worktree
|
||||
env:
|
||||
SMARTFLOW_REPO_URL: https://git.lecspace.com/${{ gitea.repository }}.git
|
||||
SMARTFLOW_GIT_REPO_URL: ${{ secrets.SMARTFLOW_GIT_REPO_URL }}
|
||||
SMARTFLOW_REPO_SHA: ${{ gitea.sha }}
|
||||
SMARTFLOW_GITEA_USER: ${{ secrets.SMARTFLOW_GITEA_USER }}
|
||||
SMARTFLOW_GITEA_TOKEN: ${{ secrets.SMARTFLOW_GITEA_TOKEN }}
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
function Add-GitHubEnv {
|
||||
param([string]$Line)
|
||||
$utf8NoBom = [System.Text.UTF8Encoding]::new($false)
|
||||
[System.IO.File]::AppendAllText($env:GITHUB_ENV, $Line + [Environment]::NewLine, $utf8NoBom)
|
||||
}
|
||||
|
||||
$worktreeRoot = Join-Path ([System.IO.Path]::GetTempPath()) "smartflow-actions"
|
||||
$worktree = Join-Path $worktreeRoot $env:SMARTFLOW_REPO_SHA
|
||||
$repoUrl = if ([string]::IsNullOrWhiteSpace($env:SMARTFLOW_GIT_REPO_URL)) { $env:SMARTFLOW_REPO_URL } else { $env:SMARTFLOW_GIT_REPO_URL }
|
||||
|
||||
if (Test-Path $worktree) {
|
||||
Remove-Item -LiteralPath $worktree -Recurse -Force
|
||||
}
|
||||
New-Item -ItemType Directory -Force -Path $worktreeRoot | Out-Null
|
||||
|
||||
$gitArgs = @()
|
||||
if (-not [string]::IsNullOrWhiteSpace($env:SMARTFLOW_GITEA_TOKEN)) {
|
||||
$giteaUser = $env:SMARTFLOW_GITEA_USER
|
||||
if ([string]::IsNullOrWhiteSpace($giteaUser)) { $giteaUser = "Losita" }
|
||||
$basicToken = [Convert]::ToBase64String([Text.Encoding]::ASCII.GetBytes(("{0}:{1}" -f $giteaUser, $env:SMARTFLOW_GITEA_TOKEN)))
|
||||
$gitArgs += @("-c", ("http.extraHeader=Authorization: Basic {0}" -f $basicToken))
|
||||
}
|
||||
|
||||
& git @gitArgs clone --no-checkout $repoUrl $worktree
|
||||
if ($LASTEXITCODE -ne 0) { throw "source clone failed." }
|
||||
|
||||
& git -C $worktree checkout --force $env:SMARTFLOW_REPO_SHA
|
||||
if ($LASTEXITCODE -ne 0) { throw "source checkout failed." }
|
||||
|
||||
& git -C $worktree clean -dffx
|
||||
if ($LASTEXITCODE -ne 0) { throw "source cleanup failed." }
|
||||
|
||||
$appTag = (& git -C $worktree rev-parse --short=12 HEAD).Trim()
|
||||
Add-GitHubEnv "APP_TAG=$appTag"
|
||||
Add-GitHubEnv "SMARTFLOW_WORKTREE=$worktree"
|
||||
|
||||
- name: Resolve release base
|
||||
env:
|
||||
INPUT_BASE_REF: ${{ inputs.base_ref }}
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
function Add-GitHubEnv {
|
||||
param([string]$Line)
|
||||
$utf8NoBom = [System.Text.UTF8Encoding]::new($false)
|
||||
[System.IO.File]::AppendAllText($env:GITHUB_ENV, $Line + [Environment]::NewLine, $utf8NoBom)
|
||||
}
|
||||
|
||||
Set-Location $env:SMARTFLOW_WORKTREE
|
||||
$baseRef = $env:INPUT_BASE_REF
|
||||
if ([string]::IsNullOrWhiteSpace($baseRef)) {
|
||||
& git rev-parse --verify --quiet "HEAD^" | Out-Null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
$baseRef = (& git rev-parse "HEAD^").Trim()
|
||||
}
|
||||
}
|
||||
|
||||
Add-GitHubEnv "BASE_REF=$baseRef"
|
||||
|
||||
- name: Build release plan
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
Set-Location $env:SMARTFLOW_WORKTREE
|
||||
.\deploy\impact-rules.ps1 -BaseRef $env:BASE_REF -HeadRef "HEAD" -OutputFile "deploy\release-plan.env"
|
||||
Get-Content -LiteralPath "deploy\release-plan.env"
|
||||
|
||||
- name: Pack docker images
|
||||
env:
|
||||
INPUT_INCLUDE_INFRA: ${{ inputs.include_infra }}
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
Set-Location $env:SMARTFLOW_WORKTREE
|
||||
$packArgs = @{
|
||||
AppTag = $env:APP_TAG
|
||||
PlanFile = "deploy\release-plan.env"
|
||||
}
|
||||
if ($env:INPUT_INCLUDE_INFRA -eq "true") {
|
||||
$packArgs["IncludeInfra"] = $true
|
||||
}
|
||||
& .\deploy\docker-pack.ps1 @packArgs
|
||||
|
||||
- name: Stage release directory
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
Set-Location $env:SMARTFLOW_WORKTREE
|
||||
.\deploy\stage-release.ps1 `
|
||||
-ReleaseDir ".release\$env:APP_TAG" `
|
||||
-PlanFile "deploy\release-plan.env" `
|
||||
-BundleDir ".docker-bundles"
|
||||
|
||||
- name: Upload release to server
|
||||
env:
|
||||
SMARTFLOW_RELEASE_HOST: ${{ secrets.SMARTFLOW_RELEASE_HOST }}
|
||||
SMARTFLOW_RELEASE_USER: ${{ secrets.SMARTFLOW_RELEASE_USER }}
|
||||
SMARTFLOW_RELEASE_PORT: ${{ secrets.SMARTFLOW_RELEASE_PORT }}
|
||||
SMARTFLOW_RELEASE_ROOT: ${{ secrets.SMARTFLOW_RELEASE_ROOT }}
|
||||
SMARTFLOW_SSH_KEY: ${{ secrets.SMARTFLOW_SSH_KEY }}
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
Set-StrictMode -Version Latest
|
||||
|
||||
Set-Location $env:SMARTFLOW_WORKTREE
|
||||
$hostName = $env:SMARTFLOW_RELEASE_HOST
|
||||
if ([string]::IsNullOrWhiteSpace($hostName)) { $hostName = "192.140.166.210" }
|
||||
$userName = $env:SMARTFLOW_RELEASE_USER
|
||||
if ([string]::IsNullOrWhiteSpace($userName)) { $userName = "root" }
|
||||
$port = $env:SMARTFLOW_RELEASE_PORT
|
||||
if ([string]::IsNullOrWhiteSpace($port)) { $port = "22" }
|
||||
$releaseRoot = $env:SMARTFLOW_RELEASE_ROOT
|
||||
if ([string]::IsNullOrWhiteSpace($releaseRoot)) { $releaseRoot = "/srv/smartflow/releases" }
|
||||
if ($releaseRoot -notmatch '^/srv/smartflow/releases(/.*)?$') { throw "release root must stay under /srv/smartflow/releases." }
|
||||
$remote = "{0}@{1}" -f $userName, $hostName
|
||||
$archivePath = Join-Path ([System.IO.Path]::GetTempPath()) ("smartflow-release-{0}.tgz" -f $env:APP_TAG)
|
||||
$remoteArchive = ("{0}/{1}.tgz" -f $releaseRoot.TrimEnd('/'), $env:APP_TAG)
|
||||
|
||||
if (Test-Path $archivePath) {
|
||||
Remove-Item -LiteralPath $archivePath -Force
|
||||
}
|
||||
& tar -C ".release\$env:APP_TAG" -czf $archivePath .
|
||||
if ($LASTEXITCODE -ne 0) { throw "release archive failed." }
|
||||
|
||||
$sshArgs = @("-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=30", "-p", $port)
|
||||
$scpArgs = @("-o", "BatchMode=yes", "-o", "StrictHostKeyChecking=no", "-o", "ConnectTimeout=30", "-P", $port)
|
||||
if (-not [string]::IsNullOrWhiteSpace($env:SMARTFLOW_SSH_KEY)) {
|
||||
$keyPath = Join-Path ([System.IO.Path]::GetTempPath()) ("smartflow-release-{0}.key" -f $env:APP_TAG)
|
||||
$env:SMARTFLOW_SSH_KEY.Replace("`r`n", "`n") | Out-File -FilePath $keyPath -Encoding ascii -NoNewline
|
||||
if ([System.Runtime.InteropServices.RuntimeInformation]::IsOSPlatform([System.Runtime.InteropServices.OSPlatform]::Windows)) {
|
||||
& icacls $keyPath /inheritance:r /grant:r "$($env:USERNAME):(R)" | Out-Null
|
||||
} else {
|
||||
& chmod 600 $keyPath
|
||||
}
|
||||
$sshArgs += @("-i", $keyPath)
|
||||
$scpArgs += @("-i", $keyPath)
|
||||
}
|
||||
|
||||
& ssh @sshArgs $remote "mkdir -p '$releaseRoot'"
|
||||
if ($LASTEXITCODE -ne 0) { throw "remote release root prepare failed." }
|
||||
|
||||
& scp @scpArgs $archivePath ("{0}:{1}" -f $remote, $remoteArchive)
|
||||
if ($LASTEXITCODE -ne 0) { throw "release upload failed." }
|
||||
|
||||
$remoteScript = @(
|
||||
"set -euo pipefail",
|
||||
"release_root='$releaseRoot'",
|
||||
"app_tag='$env:APP_TAG'",
|
||||
"archive='$remoteArchive'",
|
||||
"[[ -n `"`$release_root`" && `"`$app_tag`" =~ ^[0-9a-f]{12}$ ]]",
|
||||
"target=`"`$release_root/`$app_tag`"",
|
||||
"rm -rf `"`$target`"",
|
||||
"mkdir -p `"`$target`"",
|
||||
"tar -xzf `"`$archive`" -C `"`$target`"",
|
||||
"find `"`$target/deploy`" -maxdepth 1 -type f -name '*.sh' -exec chmod 755 {} +",
|
||||
"rm -f `"`$archive`""
|
||||
) -join "`n"
|
||||
|
||||
$remoteScript | ssh @sshArgs $remote "bash -s"
|
||||
if ($LASTEXITCODE -ne 0) { throw "remote release unpack failed." }
|
||||
|
||||
- name: Cleanup worktree
|
||||
if: ${{ always() }}
|
||||
shell: powershell
|
||||
run: |
|
||||
$ErrorActionPreference = "Stop"
|
||||
$worktreeRoot = Join-Path ([System.IO.Path]::GetTempPath()) "smartflow-actions"
|
||||
$expectedPrefix = $worktreeRoot.TrimEnd([System.IO.Path]::DirectorySeparatorChar, [System.IO.Path]::AltDirectorySeparatorChar) + [System.IO.Path]::DirectorySeparatorChar
|
||||
if (-not [string]::IsNullOrWhiteSpace($env:SMARTFLOW_WORKTREE) -and $env:SMARTFLOW_WORKTREE.StartsWith($expectedPrefix, [System.StringComparison]::OrdinalIgnoreCase)) {
|
||||
Remove-Item -LiteralPath $env:SMARTFLOW_WORKTREE -Recurse -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
deploy:
|
||||
runs-on: build-host
|
||||
needs: build-upload
|
||||
steps:
|
||||
- name: Trigger deploy
|
||||
env:
|
||||
SMARTFLOW_REPO_SHA: ${{ gitea.sha }}
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
app_tag="${SMARTFLOW_REPO_SHA:0:12}"
|
||||
smartflow-release deploy "${app_tag}"
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -31,3 +31,8 @@ backend/config.yaml
|
||||
.staticcheck-cache/
|
||||
.claude/
|
||||
.omc/
|
||||
/backend/.dev/
|
||||
/.docker-bundles/
|
||||
.gopath/
|
||||
/deploy/release-plan.env
|
||||
/.release/
|
||||
|
||||
729
README.md
729
README.md
@@ -62,7 +62,7 @@
|
||||
|
||||
目前暂未开放用户自定义时间尺度配置,当前仍以固定节次模型为主。后续会有更新计划的!
|
||||
|
||||
2. **导入学校课表。** 本项目后端已提供学校课表导入能力(当前主要尝试兼容CQUPT的课表格式),以便后续以课表为基底进行日程安排;前端完整导入流程入口仍在补齐。
|
||||
2. **导入学校课表。** 本项目后端已提供学校课表导入能力(当前主要尝试兼容CQUPT的课表图片识别与导入格式),前端也已在 `/schedule` 页面接入完整导入流程,便于后续直接以课表为基底进行日程安排。
|
||||
|
||||
3. **"水课"任务嵌入。** 正如上方**问题2**所言,在已导入课表的前提下,支持设置某一门你想拿来干其它事情的课为"可嵌入任务"状态,此时这门课所占据的时间区域就是可以嵌入任务的了,但是仍然有区别于其它完全空白的时间区域,便于真正安排适合在嘈杂环境下做的事情。
|
||||
|
||||
@@ -70,7 +70,7 @@
|
||||
|
||||
5. **一键编排任务。** 结合算法与用户配置,将任务基于导入的课表和任务类设置先生成预览结果;确认无误后,再正式应用到日程中。
|
||||
|
||||
6. **AI随口记与任务查询。** 正如问题4所言,当前版本支持通过AI随手记录一些大小事,也支持按象限、关键词、截止时间等维度查询任务;部分日程调整能力已接入确认流。
|
||||
6. **AI 对话与排程辅助。** 当前版本支持通过 AI 进行对话、查看历史会话与思考过程,并结合结构化工具完成排程分析、日程微调与确认流。
|
||||
|
||||
7. **多用户。** 本系统可支持多个用户同时使用,并且记录AI对话、编排任务的Token使用情况等,并进行限额。
|
||||
|
||||
@@ -160,158 +160,224 @@ PS:此图截至版本v0.3.3
|
||||
|
||||
## 3.2 核心表结构
|
||||
|
||||
其实每个表都很核心。在此展示它们的创建语句:
|
||||
以下结构以 **2026 年 5 月 7 日** 对运行中的 `smartflow-mysql` 容器执行 `SHOW TABLES`、`SHOW CREATE TABLE` 与 `information_schema.columns` 查询结果为准。
|
||||
|
||||
```sql
|
||||
CREATE TABLE `agent_chats`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int DEFAULT NULL,
|
||||
`message_content` text COMMENT '用户或AI的话',
|
||||
`role` varchar(255) DEFAULT NULL COMMENT 'user / assistant',
|
||||
`tokens_consumed` int DEFAULT '0' COMMENT '单次消耗,用于累加到 users 表',
|
||||
`created_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_agent_chats_id` (`id`),
|
||||
KEY `user_id` (`user_id`),
|
||||
CONSTRAINT `agent_chats_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `courses`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int DEFAULT NULL,
|
||||
`name` varchar(255) NOT NULL,
|
||||
`location` varchar(255) DEFAULT NULL,
|
||||
`is_filler` tinyint(1) DEFAULT '0',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_courses_id` (`id`),
|
||||
KEY `user_id` (`user_id`),
|
||||
CONSTRAINT `courses_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`)
|
||||
) ENGINE = InnoDB
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `schedule_events`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int NOT NULL,
|
||||
`name` varchar(255) NOT NULL COMMENT '课程或任务名称',
|
||||
`location` varchar(255) DEFAULT '' COMMENT '地点 (教学楼/会议室)',
|
||||
`type` enum ('course','task') NOT NULL COMMENT '日程类型',
|
||||
`rel_id` int DEFAULT NULL COMMENT '关联原始数据ID (如教务系统的课程ID)',
|
||||
`can_be_embedded` tinyint(1) NOT NULL DEFAULT '0' COMMENT '是否允许在此时段嵌入其他任务',
|
||||
`created_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
`updated_at` timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
|
||||
`start_time` datetime DEFAULT NULL COMMENT '任务开始的绝对时间',
|
||||
`end_time` datetime DEFAULT NULL COMMENT '任务结束的绝对时间',
|
||||
PRIMARY KEY (`id`),
|
||||
KEY `idx_user_events` (`user_id`),
|
||||
KEY `idx_user_endtime` (`user_id`, `end_time` DESC),
|
||||
CONSTRAINT `fk_event_user` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 148
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `schedules`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`event_id` int NOT NULL COMMENT '关联元数据ID',
|
||||
`user_id` int NOT NULL COMMENT '冗余UID方便直接查询',
|
||||
`week` int NOT NULL COMMENT '周次 (1-25)',
|
||||
`day_of_week` int NOT NULL COMMENT '星期 (1-7)',
|
||||
`section` int NOT NULL COMMENT '原子化节次 (1-12)',
|
||||
`embedded_task_id` int DEFAULT NULL COMMENT '若为水课嵌入,记录具体的任务项ID',
|
||||
`status` enum ('normal','interrupted') DEFAULT 'normal' COMMENT '状态: 正常/因故中断',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `idx_user_slot_atomic` (`user_id`, `week`, `day_of_week`, `section`),
|
||||
KEY `idx_event_id` (`event_id`),
|
||||
KEY `fk_embedded_task` (`embedded_task_id`),
|
||||
CONSTRAINT `fk_embedded_task` FOREIGN KEY (`embedded_task_id`) REFERENCES `task_items` (`id`) ON DELETE SET NULL,
|
||||
CONSTRAINT `fk_schedule_event` FOREIGN KEY (`event_id`) REFERENCES `schedule_events` (`id`) ON DELETE CASCADE,
|
||||
CONSTRAINT `fk_schedule_user` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`) ON DELETE CASCADE
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 214
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `task_classes`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int DEFAULT NULL,
|
||||
`name` varchar(255) DEFAULT NULL,
|
||||
`mode` enum ('auto','manual') DEFAULT NULL,
|
||||
`start_date` date DEFAULT NULL,
|
||||
`end_date` date DEFAULT NULL,
|
||||
`total_slots` int DEFAULT NULL COMMENT '分配的总节数',
|
||||
`allow_filler_course` tinyint(1) DEFAULT '1',
|
||||
`strategy` enum ('steady','rapid') DEFAULT NULL,
|
||||
`excluded_slots` json DEFAULT NULL COMMENT '不想要的时段切片',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_task_classes_id` (`id`),
|
||||
KEY `idx_task_classes_user_id` (`user_id`),
|
||||
CONSTRAINT `task_classes_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`)
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 15
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `task_items`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`category_id` int DEFAULT NULL,
|
||||
`content` text,
|
||||
`embedded_time` json DEFAULT NULL COMMENT '目标时间{date,section_from,section_to}',
|
||||
`status` int DEFAULT NULL COMMENT '1:未安排, 2:已应用',
|
||||
`order` int DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_task_items_id` (`id`),
|
||||
KEY `task_items_ibfk_1` (`category_id`),
|
||||
CONSTRAINT `task_items_ibfk_1` FOREIGN KEY (`category_id`) REFERENCES `task_classes` (`id`) ON DELETE CASCADE
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 43
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `tasks`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`user_id` int DEFAULT NULL,
|
||||
`title` varchar(255) NOT NULL,
|
||||
`priority` int DEFAULT NULL,
|
||||
`is_completed` tinyint(1) DEFAULT '0',
|
||||
`deadline_at` timestamp NULL DEFAULT NULL,
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `uk_tasks_id` (`id`),
|
||||
KEY `idx_user_id` (`user_id`),
|
||||
CONSTRAINT `tasks_ibfk_1` FOREIGN KEY (`user_id`) REFERENCES `users` (`id`),
|
||||
CONSTRAINT `chk_priority` CHECK ((`priority` in (1, 2, 3, 4)))
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 23
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
|
||||
CREATE TABLE `users`
|
||||
(
|
||||
`id` int NOT NULL AUTO_INCREMENT,
|
||||
`username` varchar(255) NOT NULL,
|
||||
`password` varchar(255) NOT NULL,
|
||||
`phone_number` varchar(255) DEFAULT NULL,
|
||||
`token_limit` int DEFAULT '100000',
|
||||
`token_usage` int DEFAULT '0',
|
||||
`last_reset_at` timestamp NULL DEFAULT NULL COMMENT '上次周用量重置时间',
|
||||
PRIMARY KEY (`id`),
|
||||
UNIQUE KEY `username` (`username`),
|
||||
UNIQUE KEY `uk_users_id` (`id`)
|
||||
) ENGINE = InnoDB
|
||||
AUTO_INCREMENT = 4
|
||||
DEFAULT CHARSET = utf8mb4
|
||||
COLLATE = utf8mb4_0900_ai_ci
|
||||
### 3.2.0 全量表简述总览
|
||||
|
||||
| 领域 | 表名 | 简述 |
|
||||
| --- | --- | --- |
|
||||
| 用户与账户 | `users` | 用户主表,保存账号、密码、手机号与 token 配额。 |
|
||||
| 用户与账户 | `user_token_usage_adjustments` | 用户 token 用量调整流水,按事件记录增减量。 |
|
||||
| 用户与账户 | `user_notification_channels` | 用户通知通道配置表,保存 webhook、鉴权方式与最近一次测试结果。 |
|
||||
| 任务与课表 | `tasks` | 首页任务池主表,承载优先级、DDL、预计节数与紧迫阈值。 |
|
||||
| 任务与课表 | `task_classes` | 任务类定义表,保存任务类模式、策略、节数与学习属性配置。 |
|
||||
| 任务与课表 | `task_items` | 任务类下的任务块明细表,记录内容、顺序、嵌入时间与状态。 |
|
||||
| 任务与课表 | `schedule_events` | 课程 / 任务事件元数据表,是正式日程块的事实来源。 |
|
||||
| 任务与课表 | `schedules` | 节次级日程展开表,把事件落到周次、星期与节次坐标。 |
|
||||
| Agent | `agent_chats` | AI 会话头表,保存会话标题、模型、状态、消息数与 token 汇总。 |
|
||||
| Agent | `chat_histories` | AI 消息明细表,保存正文、角色、推理内容、重试分支与 token 消耗。 |
|
||||
| Agent | `agent_timeline_events` | Agent 时间线事件表,给前端时间线、状态流与调试回放使用。 |
|
||||
| Agent | `agent_schedule_states` | Agent 排程状态快照表,保存会话内的排程运行态与 revision。 |
|
||||
| Agent | `agent_state_snapshot_records` | Agent 阶段性状态快照归档表,按 phase 留存排障用 snapshot。 |
|
||||
| 主动调度 | `active_schedule_jobs` | 主动调度后台任务表,管理定时触发、扫描状态、去重键与错误信息。 |
|
||||
| 主动调度 | `active_schedule_triggers` | 主动调度触发表,记录触发来源、目标对象、幂等键、处理状态与 trace。 |
|
||||
| 主动调度 | `active_schedule_previews` | 主动调度预览结果表,保存候选方案、决策结果、风险说明与 apply 结果。 |
|
||||
| 主动调度 | `active_schedule_sessions` | 主动调度会话表,串起 trigger、preview 与对话侧 session 状态。 |
|
||||
| Memory / RAG | `memory_items` | 记忆条目主表,保存内容、置信度、重要度、敏感级别与向量状态。 |
|
||||
| Memory / RAG | `memory_jobs` | 记忆任务表,保存提取 / 管理类任务的 payload、重试与错误状态。 |
|
||||
| Memory / RAG | `memory_audit_logs` | 记忆审计日志表,记录 memory 条目的变更前后内容与操作原因。 |
|
||||
| Memory / RAG | `memory_user_settings` | 用户级记忆配置表,控制 memory 总开关与隐式 / 敏感记忆开关。 |
|
||||
| 通知 | `notification_records` | 通知发送记录表,保存触发源、摘要、兜底文案、重试状态与供应商响应。 |
|
||||
| 社区 / 任务类分享 | `forum_posts` | 社区帖子主表,对应任务类分享内容及其点赞 / 评论 / 导入统计。 |
|
||||
| 社区 / 任务类分享 | `forum_post_templates` | 帖子对应的任务类模板表,固化分享时的任务类配置快照。 |
|
||||
| 社区 / 任务类分享 | `forum_post_template_items` | 帖子模板下的任务块明细表,保存来源 task_item 与排序。 |
|
||||
| 社区 / 任务类分享 | `forum_likes` | 帖子点赞记录表,记录点赞人、作者、事件 ID 与取消状态。 |
|
||||
| 社区 / 任务类分享 | `forum_comments` | 帖子评论表,支持父子评论结构、幂等键与软删除。 |
|
||||
| 社区 / 任务类分享 | `forum_imports` | 社区模板导入记录表,记录导入目标任务类、状态与失败原因。 |
|
||||
| Credit / 计费 | `credit_accounts` | 用户 Credit 账户表,维护余额与累计充值 / 奖励 / 消耗。 |
|
||||
| Credit / 计费 | `credit_ledger` | Credit 台账流水表,记录每次变动的来源、方向、前后余额与描述。 |
|
||||
| Credit / 计费 | `credit_orders` | Credit 购买订单表,保存商品快照、数量、金额、支付状态与入账状态。 |
|
||||
| Credit / 计费 | `credit_products` | Credit 商品表,定义 SKU、额度、价格、排序与上下架状态。 |
|
||||
| Credit / 计费 | `credit_price_rules` | 模型计费规则表,定义 provider / model 的价格与利润率映射。 |
|
||||
| Credit / 计费 | `credit_reward_rules` | Credit 奖励规则表,定义奖励来源、额度、状态与配置。 |
|
||||
| Outbox | `agent_outbox_messages` | Agent 域 Outbox 表,承接聊天持久化、时间线与状态事件投递。 |
|
||||
| Outbox | `task_outbox_messages` | 任务域 Outbox 表,承接任务相关异步事件投递。 |
|
||||
| Outbox | `memory_outbox_messages` | Memory 域 Outbox 表,承接记忆提取 / 管理类异步事件投递。 |
|
||||
| Outbox | `active_scheduler_outbox_messages` | 主动调度域 Outbox 表,承接调度触发相关异步事件。 |
|
||||
| Outbox | `notification_outbox_messages` | 通知域 Outbox 表,承接外部通知发送事件。 |
|
||||
| Outbox | `taskclass_forum_outbox_messages` | 社区 / 任务类分享域 Outbox 表,承接点赞、导入等异步事件。 |
|
||||
| Outbox | `llm_outbox_messages` | LLM 域 Outbox 表,承接模型计费 / 记账等异步事件。 |
|
||||
| Outbox | `token_store_outbox_messages` | Token / Credit 域 Outbox 表,承接 token 或 credit 账务类异步事件。 |
|
||||
|
||||
当前库中与主业务链路最相关的核心表包括:
|
||||
|
||||
- `users`
|
||||
- `tasks`
|
||||
- `task_classes`
|
||||
- `task_items`
|
||||
- `schedule_events`
|
||||
- `schedules`
|
||||
- `agent_chats`
|
||||
- `chat_histories`
|
||||
|
||||
### 3.2.1 `users`
|
||||
|
||||
```text
|
||||
id bigint unsigned PK AUTO_INCREMENT
|
||||
username varchar(255) NOT NULL UNIQUE
|
||||
password varchar(255) NOT NULL
|
||||
phone_number varchar(255) NULL
|
||||
token_limit bigint DEFAULT 100000
|
||||
token_usage bigint DEFAULT 0
|
||||
last_reset_at datetime(3) NULL
|
||||
```
|
||||
|
||||
### 3.2.2 `tasks`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
user_id bigint NULL
|
||||
title varchar(255) NULL
|
||||
priority bigint NOT NULL
|
||||
is_completed tinyint(1) DEFAULT 0
|
||||
deadline_at datetime(3) NULL
|
||||
urgency_threshold_at datetime(3) NULL
|
||||
estimated_sections bigint NOT NULL DEFAULT 1
|
||||
索引:
|
||||
- idx_tasks_user_id(user_id)
|
||||
- idx_user_done_threshold_priority(user_id, is_completed, urgency_threshold_at, priority)
|
||||
```
|
||||
|
||||
### 3.2.3 `task_classes`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
user_id bigint NULL
|
||||
name varchar(255) NULL
|
||||
mode enum('auto','manual') NULL
|
||||
start_date datetime(3) NULL
|
||||
end_date datetime(3) NULL
|
||||
total_slots bigint NULL
|
||||
allow_filler_course tinyint(1) DEFAULT 1
|
||||
strategy enum('steady','rapid') NULL
|
||||
excluded_slots json NULL
|
||||
subject_type varchar(32) NULL
|
||||
difficulty_level varchar(16) NULL
|
||||
cognitive_intensity varchar(16) NULL
|
||||
excluded_days_of_week json NULL
|
||||
索引:
|
||||
- idx_task_classes_user_id(user_id)
|
||||
```
|
||||
|
||||
### 3.2.4 `task_items`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
category_id bigint NULL
|
||||
order bigint NULL
|
||||
content text NULL
|
||||
embedded_time json NULL
|
||||
status bigint NULL
|
||||
索引 / 约束:
|
||||
- fk_task_classes_items(category_id -> task_classes.id)
|
||||
```
|
||||
|
||||
### 3.2.5 `schedule_events`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
user_id bigint NOT NULL
|
||||
name varchar(255) NOT NULL
|
||||
location varchar(255) DEFAULT ''
|
||||
type enum('course','task') NOT NULL
|
||||
rel_id bigint NULL
|
||||
can_be_embedded tinyint(1) NOT NULL DEFAULT 0
|
||||
start_time datetime(3) NULL
|
||||
end_time datetime(3) NULL
|
||||
task_source_type varchar(32) NOT NULL DEFAULT ''
|
||||
makeup_for_event_id bigint NULL
|
||||
active_preview_id varchar(64) NULL
|
||||
索引:
|
||||
- idx_user_events(user_id)
|
||||
- idx_schedule_event_task_source(task_source_type)
|
||||
- idx_schedule_event_makeup_for(makeup_for_event_id)
|
||||
- idx_schedule_event_active_preview(active_preview_id)
|
||||
```
|
||||
|
||||
### 3.2.6 `schedules`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
event_id bigint NOT NULL
|
||||
user_id bigint NOT NULL
|
||||
week bigint NOT NULL
|
||||
day_of_week bigint NOT NULL
|
||||
section bigint NOT NULL
|
||||
embedded_task_id bigint NULL
|
||||
status enum('normal','interrupted') DEFAULT 'normal'
|
||||
索引 / 约束:
|
||||
- UNIQUE idx_user_slot_atomic(user_id, week, day_of_week, section)
|
||||
- idx_event_id(event_id)
|
||||
- fk_schedules_embedded_task(embedded_task_id -> task_items.id)
|
||||
- fk_schedules_event(event_id -> schedule_events.id)
|
||||
```
|
||||
|
||||
### 3.2.7 `agent_chats`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
chat_id varchar(36) NOT NULL UNIQUE
|
||||
user_id bigint NOT NULL
|
||||
title varchar(255) NULL
|
||||
system_prompt text NULL
|
||||
model varchar(100) NULL
|
||||
message_count bigint NOT NULL DEFAULT 0
|
||||
tokens_total bigint NOT NULL DEFAULT 0
|
||||
last_message_at datetime(3) NULL
|
||||
status varchar(32) NOT NULL DEFAULT 'active'
|
||||
created_at datetime(3) NULL
|
||||
updated_at datetime(3) NULL
|
||||
deleted_at datetime(3) NULL
|
||||
compaction_summary text NULL
|
||||
compaction_watermark bigint NOT NULL DEFAULT 0
|
||||
context_token_stats json NULL
|
||||
last_history_event_id varchar(64) NULL
|
||||
last_token_adjust_event_id varchar(64) NULL
|
||||
索引:
|
||||
- idx_user_last(user_id)
|
||||
- idx_user_status(user_id, status)
|
||||
```
|
||||
|
||||
### 3.2.8 `chat_histories`
|
||||
|
||||
```text
|
||||
id bigint PK AUTO_INCREMENT
|
||||
chat_id varchar(36) NOT NULL
|
||||
user_id bigint NOT NULL
|
||||
message_content text NULL
|
||||
role varchar(32) NULL
|
||||
tokens_consumed bigint NOT NULL DEFAULT 0
|
||||
created_at datetime(3) NULL
|
||||
reasoning_content text NULL
|
||||
reasoning_duration_seconds bigint NOT NULL DEFAULT 0
|
||||
retry_group_id varchar(64) NULL
|
||||
retry_index bigint NULL
|
||||
retry_from_user_message_id bigint NULL
|
||||
retry_from_assistant_message_id bigint NULL
|
||||
source_event_id varchar(64) NULL UNIQUE
|
||||
索引:
|
||||
- idx_user_chat(user_id, chat_id)
|
||||
- idx_chat_id(chat_id)
|
||||
- idx_retry_group(retry_group_id)
|
||||
```
|
||||
|
||||
补充说明:
|
||||
|
||||
1. 运行库里已经不存在 README 旧版那种“`agent_chats` 直接承载单条消息正文”的结构;现在是 `agent_chats` 管会话头,`chat_histories` 管消息明细。
|
||||
2. `task_classes.start_date` / `end_date` 在运行库中是 `datetime(3)`,不是旧文档里的 `date`。
|
||||
3. `tasks` 现在多了 `urgency_threshold_at` 与 `estimated_sections`,`task_classes` 现在多了 `subject_type`、`difficulty_level`、`cognitive_intensity`、`excluded_days_of_week`。
|
||||
4. README 此处如果后续再更新,建议继续以运行中的 MySQL 查询结果为准,而不是只看代码结构体。
|
||||
|
||||
# 4 接口契约
|
||||
|
||||
## 4.1 核心API列表(ApiFox)
|
||||
@@ -331,40 +397,42 @@ CREATE TABLE `users`
|
||||
```
|
||||
|
||||
2. 日程写工具默认走 `confirm` 确认闸门(`always_execute=true` 时可跳过确认)。
|
||||
3. 非日程写工具(如 `quick_note_create`、`query_tasks`、`web_search`、`web_fetch`)走 `continue + tool_call`。
|
||||
3. 非日程类工具(如 `context_tools_add`、`context_tools_remove`、`web_search`、`web_fetch`)走 `continue + tool_call`;其中 `upsert_task_class` 虽不依赖 `ScheduleState`,但仍属于真实写库入口。
|
||||
4. 当前每轮只允许调用一个工具,不支持同轮批量工具数组。
|
||||
|
||||
### 4.2.2 工具清单(当前版本)
|
||||
|
||||
| 工具名 | 类型 | 是否需确认 | 是否依赖 ScheduleState | 核心参数 | 作用与约束 |
|
||||
| --- | --- | --- | --- | --- | --- |
|
||||
| `get_overview` | 读 | 否 | 是 | 无 | 获取规划窗口总览(任务视角,全量返回) |
|
||||
| `query_range` | 读 | 否 | 是 | `day`(必填), `slot_start`, `slot_end` | 查询某天/某时段占用详情 |
|
||||
| `context_tools_add` | 上下文控制 | 否 | 否 | `domain`, `packs`, `mode` | 激活 `schedule` / `taskclass` 工具域;`schedule` 可按 pack 增量注入 `mutation / analyze / detail_read / deep_analyze / queue / web` |
|
||||
| `context_tools_remove` | 上下文控制 | 否 | 否 | `domain`, `packs`, `all` | 移除指定工具域或 pack;`core` 固定包不允许 remove |
|
||||
| `get_overview` | 读 | 否 | 是 | 无 | 获取当前规划窗口总览(任务视角,全量返回) |
|
||||
| `query_range` | 读 | 否 | 是 | `day`(必填), `slot_start`, `slot_end` | 查询某天 / 某时段占用详情 |
|
||||
| `query_available_slots` | 读 | 否 | 是 | `span`, `duration`, `limit`, `day_scope`, `week_filter` 等 | 查询候选空位池(纯空位优先,不足再补可嵌入位) |
|
||||
| `query_target_tasks` | 读 | 否 | 是 | `status`, `category`, `task_ids`, `enqueue` 等 | 过滤任务集合,可选自动入队供后续队列工具处理 |
|
||||
| `queue_pop_head` | 读 | 否 | 是 | 无 | 取出/复用当前队首任务(一次只处理一个) |
|
||||
| `queue_status` | 读 | 否 | 是 | 无 | 查看队列状态(pending/current/completed/skipped) |
|
||||
| `query_target_tasks` | 读 | 否 | 是 | `status`, `category`, `task_ids`, `enqueue`, `reset_queue` 等 | 过滤任务集合,可选自动入队供后续队列工具处理 |
|
||||
| `queue_pop_head` | 队列读 | 否 | 是 | 无 | 取出 / 复用当前队首任务(一次只处理一个) |
|
||||
| `queue_status` | 队列读 | 否 | 是 | 无 | 查看队列状态(pending / current / completed / skipped) |
|
||||
| `get_task_info` | 读 | 否 | 是 | `task_id`(必填) | 查询单任务详细信息 |
|
||||
| `analyze_rhythm` | 分析 | 否 | 是 | `category`, `include_pending`, `detail`, `hard_categories` | 分析学习节奏、连续同类任务与切换情况 |
|
||||
| `analyze_health` | 分析 | 否 | 是 | `detail`, `dimensions`, `threshold` | 作为主动优化裁判入口,判断当前排程是否仍值得继续优化 |
|
||||
| `place` | 写 | 是 | 是 | `task_id`, `day`, `slot_start`(均必填) | 将待安排任务预排到指定位置 |
|
||||
| `move` | 写 | 是 | 是 | `task_id`, `new_day`, `new_slot_start`(均必填) | 仅允许移动 `suggested`;`existing` 不可 `move` |
|
||||
| `swap` | 写 | 是 | 是 | `task_a`, `task_b`(均必填) | 交换两个已落位任务,要求时长一致 |
|
||||
| `batch_move` | 写 | 是 | 是 | `moves[]`(必填) | 原子批量移动,当前最多 2 条,任一冲突整批回滚 |
|
||||
| `queue_apply_head_move` | 写 | 是 | 是 | `new_day`, `new_slot_start`(均必填) | 移动当前队首并自动出队,不接受 `task_id` |
|
||||
| `queue_apply_head_move` | 队列写 | 是 | 是 | `new_day`, `new_slot_start`(均必填) | 移动当前队首并自动出队,不接受 `task_id` |
|
||||
| `queue_skip_head` | 队列控制 | 否 | 是 | `reason` | 跳过当前队首并标记 `skipped`(不改日程) |
|
||||
| `spread_even` | 写 | 是 | 是 | `task_ids`(必填,兼容 `task_id`) | 在任务集合内做均匀铺开,按筛选条件原子落地 |
|
||||
| `min_context_switch` | 写 | 是 | 是 | `task_ids`(必填,兼容 `task_id`) | 减少上下文切换重排;仅在用户明确允许打乱顺序时可执行 |
|
||||
| `unplace` | 写 | 是 | 是 | `task_id`(必填) | 取消任务落位并恢复待安排状态 |
|
||||
| `quick_note_create` | 读写混合(业务写入) | 否 | 否 | `title`(必填), `deadline_at`, `priority_group` | 记录随口记任务,支持中文相对时间;优先级可自动推断 |
|
||||
| `query_tasks` | 读 | 否 | 否 | `quadrant`, `keyword`, `deadline_before/after`, `limit` 等 | 按象限/关键词/时间边界查询任务 |
|
||||
| `web_search` | 读 | 否 | 否 | `query`(必填), `top_k`, `domain_allow`, `recency_days` | Web 检索,返回结构化标题/摘要/URL;未启用时优雅返回错误 observation |
|
||||
| `upsert_task_class` | 任务类写入 | 是 | 否 | `id`, `task_class`, `items`, `source` | 统一创建 / 更新任务类入口;会写库,但不直接修改当前日程预览 |
|
||||
| `web_search` | 读 | 否 | 否 | `query`(必填), `top_k`, `domain_allow`, `recency_days` | Web 检索,返回结构化标题 / 摘要 / URL;未启用时优雅返回错误 observation |
|
||||
| `web_fetch` | 读 | 否 | 否 | `url`(必填), `max_chars` | 抓取并清洗网页正文;服务不可用时优雅返回错误 observation |
|
||||
|
||||
### 4.2.3 当前实现中的关键规则
|
||||
|
||||
1. `min_context_switch` 有顺序护栏:未授权“允许打乱顺序”会被后端拦截并返回拒绝结果。
|
||||
2. `batch_move` 有安全上限:当前最多支持 2 条移动请求,超出建议走队列化逐项处理。
|
||||
3. `quick_note_create` 和 `query_tasks` 不依赖 `ScheduleState`,由执行层注入 `_user_id` 后可直接调用。
|
||||
4. `web_search`/`web_fetch` 失败不会打断主链路,都会回传结构化错误 observation 给模型继续决策。
|
||||
1. `context_tools_add` / `context_tools_remove` 是动态区协议入口:`schedule` 支持按 pack 精细注入,`taskclass` 当前只有 `core` 固定包。
|
||||
2. 日程写工具默认走 `confirm` 确认闸门(`always_execute=true` 时可跳过)。
|
||||
3. `upsert_task_class` 虽不依赖 `ScheduleState`,但属于真实写库入口,仍要求 confirm。
|
||||
4. `batch_move` 有安全上限:当前最多支持 2 条移动请求,超出建议走队列化逐项处理。
|
||||
5. `web_search` / `web_fetch` 失败不会打断主链路,都会回传结构化错误 observation 给模型继续决策。
|
||||
|
||||
|
||||
# 5 后端实现
|
||||
@@ -373,22 +441,169 @@ CREATE TABLE `users`
|
||||
|
||||
| **分类** | **选用技术** | **在时伴中的应用场景** |
|
||||
| ----------------- | ---------------- | ------------------------------------------------------------ |
|
||||
| **Web 框架** | **Gin** | 负责全站 API 的路由分发,处理任务增删改查及智能排程的请求。 |
|
||||
| **持久层数据库** | **MySQL 8.0** | 存储用户、任务、课表及日程运行图(Schedules)的核心数据。 |
|
||||
| **ORM 框架** | **GORM** | 用于简化 Go 与数据库的交互,利用事务处理 `Apply` 接口的原子性操作。 |
|
||||
| **高性能缓存** | **Redis** | 缓存用户的周日程视图(避免频繁扫表)、存储 Token 临时限额、实现分布式锁防止重复排程。 |
|
||||
| **消息队列** | **Outbox + Kafka** | **可靠异步解耦**:请求主链路先写 Outbox,后台再投递 Kafka 并消费落库,既降低首字延迟又避免消息瞬时丢失。 |
|
||||
| **AI 编排框架** | **Eino** | 作为 AI Agent 的大脑,根据排程策略(Steady/Rapid)计算任务与水课的嵌入逻辑。 |
|
||||
| **身份认证** | **JWT** | 实现无状态登录,将 `user_id` 封装在 Token 中,确保数据的用户隔离。 |
|
||||
| **配置管理** | **Viper** | 管理数据库、Redis、Kafka 的连接参数,支持多环境(开发/生产)切换。 |
|
||||
| **API 文档/调试** | **Apifox** | 维护接口协议,进行前后端联调及自动化测试。 |
|
||||
| **日志监控** | **Zap / Logrus** | 记录系统运行状态,特别是 Kafka 消费失败或 AI 接口超时的错误日志。 |
|
||||
| **API 网关** | **Gin** | 负责统一 HTTP 入口、JWT 鉴权、限流、幂等控制与前端 API 聚合。 |
|
||||
| **服务间通信** | **go-zero zrpc + gRPC** | `api` 网关通过 zrpc 调用 `userauth / task / schedule / agent / memory / llm` 等后端服务,支撑当前多服务拆分。 |
|
||||
| **持久层数据库** | **MySQL 8.0** | 存储用户、任务、课表、Agent 会话、主动调度、社区、Credit、memory 等核心结构化数据。 |
|
||||
| **ORM / 数据访问** | **GORM** | 负责 MySQL 映射、事务处理、索引约束落地,以及日程 / Agent / memory 等域的数据访问。 |
|
||||
| **缓存与运行态存储** | **Redis** | 缓存周课表与会话状态,承接幂等键、限流、Agent 状态快照、日程预览与 memory 预取上下文。 |
|
||||
| **异步事件总线** | **Outbox + Kafka(segmentio/kafka-go)** | 用于聊天持久化、memory 抽取、主动调度触发、通知投递、Credit 记账等异步事件解耦。 |
|
||||
| **AI / Agent 编排** | **CloudWeGo Eino** | 承担对话规划、工具调用、确认流、排程执行与 deliver/interrupt 状态机编排。 |
|
||||
| **模型接入** | **Volcengine Ark + OpenAI Compatible 接口** | `llm` / `course` / `memory` / `agent` 等服务通过统一模型层调用文本模型、推理模型与课表识别视觉模型。 |
|
||||
| **向量检索** | **Milvus** | 为长期记忆与 RAG 检索提供向量索引、召回与相似度搜索能力。 |
|
||||
| **向量依赖** | **MinIO + etcd** | 作为 Milvus 的对象存储与元数据依赖,随容器编排一起部署。 |
|
||||
| **身份认证** | **JWT** | 实现无状态登录,并把 `user_id` 等身份信息透传到 API 与服务层。 |
|
||||
| **配置管理** | **Viper** | 管理数据库、Redis、Kafka、RPC、模型、memory/RAG、通知等多环境配置。 |
|
||||
| **容器化交付** | **Dockerfile + Docker Compose + Nginx** | 已支持基础设施容器化与整站容器化;前端镜像通过 Nginx 托管静态产物。 |
|
||||
| **接口调试 / 文档** | **Apifox** | 用于维护接口协议、联调接口与沉淀阶段性 API 文档。 |
|
||||
| **日志与观测** | **标准库 `log` + Gin Logger/Recovery** | 当前代码以标准库日志为主,HTTP 层使用 Gin 默认日志与恢复中间件;Kafka、RAG、memory、Agent 链路也会输出关键观测日志。 |
|
||||
|
||||
## 5.2 架构图
|
||||
|
||||
PS:截至v0.3.3。其中黑色箭头为请求数据链路,绿色箭头为返回数据,虚线箭头为控制流。
|
||||
基于当前 `docker-compose.full.yml`、`backend/cmd/*` 与各服务 `dao/connect.go` 整理。实线表示同步 HTTP / RPC / 直连存储关系,虚线表示 Outbox + Kafka 异步事件链路;标注“迁移期”的连线表示该服务仍在直接读写其他域的表。
|
||||
|
||||

|
||||
```mermaid
|
||||
flowchart TB
|
||||
FE["Frontend (Vue)"] --> API["api / Gin Gateway<br/>JWT 鉴权 / 限流 / 幂等"]
|
||||
|
||||
subgraph SVC["服务层"]
|
||||
direction LR
|
||||
UA["userauth"]
|
||||
TASK["task"]
|
||||
COURSE["course"]
|
||||
TCLASS["task-class"]
|
||||
SCH["schedule"]
|
||||
AGENT["agent"]
|
||||
MEM["memory"]
|
||||
AS["active-scheduler"]
|
||||
NOTI["notification"]
|
||||
FORUM["taskclassforum"]
|
||||
TOKEN["tokenstore"]
|
||||
LLM["llm"]
|
||||
RAG["RAG / Milvus"]
|
||||
end
|
||||
|
||||
API --> UA
|
||||
API --> TASK
|
||||
API --> COURSE
|
||||
API --> TCLASS
|
||||
API --> SCH
|
||||
API --> AGENT
|
||||
API --> MEM
|
||||
API --> AS
|
||||
API --> NOTI
|
||||
API --> FORUM
|
||||
API --> TOKEN
|
||||
|
||||
AGENT --> LLM
|
||||
AGENT --> TASK
|
||||
AGENT --> TCLASS
|
||||
AGENT --> SCH
|
||||
AGENT --> MEM
|
||||
AGENT --> RAG
|
||||
COURSE --> LLM
|
||||
MEM --> LLM
|
||||
MEM --> RAG
|
||||
AS --> LLM
|
||||
AS --> TASK
|
||||
AS --> SCH
|
||||
FORUM --> TCLASS
|
||||
LLM --> TOKEN
|
||||
|
||||
subgraph REDIS["Redis"]
|
||||
direction TB
|
||||
R_API["限流 / 幂等响应缓存"]
|
||||
R_AUTH["JWT 黑名单 / Token 配额快照"]
|
||||
R_TASK["任务列表缓存 / 紧急性提升去重锁"]
|
||||
R_SCH["今日日程 / 周视图 / 最近完成 / 当前进行中缓存"]
|
||||
R_TCLASS["任务集列表缓存"]
|
||||
R_AGENT["会话历史 / Timeline / Schedule Preview / Agent State / Memory Prefetch"]
|
||||
R_FORUM["评论树缓存"]
|
||||
R_CREDIT["Credit 余额快照 / Blocked 标记"]
|
||||
end
|
||||
|
||||
API --> R_API
|
||||
UA --> R_AUTH
|
||||
TASK --> R_TASK
|
||||
SCH --> R_SCH
|
||||
TCLASS --> R_TCLASS
|
||||
AGENT --> R_AGENT
|
||||
FORUM --> R_FORUM
|
||||
TOKEN --> R_CREDIT
|
||||
LLM --> R_CREDIT
|
||||
|
||||
subgraph MYSQL["MySQL"]
|
||||
direction TB
|
||||
T_USERS["users<br/>user_token_usage_adjustments"]
|
||||
T_TASK["tasks"]
|
||||
T_TCLASS["task_classes<br/>task_items"]
|
||||
T_SCH["schedules<br/>schedule_events"]
|
||||
T_AGENT["agent_chats<br/>chat_histories<br/>agent_timeline_events<br/>agent_schedule_states<br/>agent_state_snapshot_records"]
|
||||
T_AS["active_schedule_jobs<br/>active_schedule_triggers<br/>active_schedule_previews<br/>active_schedule_sessions"]
|
||||
T_MEM["memory_items<br/>memory_jobs<br/>memory_audit_logs<br/>memory_user_settings"]
|
||||
T_NOTI["notification_records<br/>user_notification_channels"]
|
||||
T_FORUM["forum_posts<br/>forum_post_templates<br/>forum_post_template_items<br/>forum_likes<br/>forum_comments<br/>forum_imports"]
|
||||
T_CREDIT["credit_accounts<br/>credit_ledger<br/>credit_products<br/>credit_orders<br/>credit_price_rules<br/>credit_reward_rules"]
|
||||
|
||||
OB_AGENT["agent_outbox_messages"]
|
||||
OB_TASK["task_outbox_messages"]
|
||||
OB_MEM["memory_outbox_messages"]
|
||||
OB_AS["active_scheduler_outbox_messages"]
|
||||
OB_NOTI["notification_outbox_messages"]
|
||||
OB_TOKEN["token_store_outbox_messages"]
|
||||
end
|
||||
|
||||
UA --> T_USERS
|
||||
TASK --> T_TASK
|
||||
TCLASS --> T_TCLASS
|
||||
TCLASS -.->|迁移期直写| T_SCH
|
||||
SCH --> T_SCH
|
||||
SCH -.->|迁移期依赖| T_TASK
|
||||
SCH -.->|迁移期依赖| T_TCLASS
|
||||
COURSE -.->|课表导入写入| T_SCH
|
||||
AGENT --> T_AGENT
|
||||
AGENT -.->|读取任务| T_TASK
|
||||
AGENT -.->|读取任务集| T_TCLASS
|
||||
AGENT -.->|读取日程 / 预览| T_SCH
|
||||
AS --> T_AS
|
||||
AS -.->|读取会话与时间线| T_AGENT
|
||||
MEM --> T_MEM
|
||||
NOTI --> T_NOTI
|
||||
FORUM --> T_FORUM
|
||||
TOKEN --> T_CREDIT
|
||||
LLM -.->|读取价格规则 / Credit 守卫| T_CREDIT
|
||||
|
||||
subgraph KAFKA["Outbox + Kafka"]
|
||||
direction TB
|
||||
K_AGENT["smartflow.agent.outbox"]
|
||||
K_TASK["smartflow.task.outbox"]
|
||||
K_MEM["smartflow.memory.outbox"]
|
||||
K_AS["smartflow.active-scheduler.outbox"]
|
||||
K_NOTI["smartflow.notification.outbox"]
|
||||
K_TOKEN["smartflow.token-store.outbox"]
|
||||
end
|
||||
|
||||
AGENT -.->|聊天持久化 / 状态快照 / Timeline 持久化| OB_AGENT
|
||||
AGENT -.->|memory.extract.requested| OB_MEM
|
||||
TASK -.->|task.urgency.promote.requested| OB_TASK
|
||||
AS -.->|active_schedule.triggered| OB_AS
|
||||
AS -.->|notification.feishu.requested| OB_NOTI
|
||||
FORUM -.->|forum.post.liked / forum.post.imported| OB_TOKEN
|
||||
LLM -.->|credit.charge.requested| OB_TOKEN
|
||||
|
||||
OB_AGENT -.->|relay| K_AGENT
|
||||
OB_TASK -.->|relay| K_TASK
|
||||
OB_MEM -.->|relay| K_MEM
|
||||
OB_AS -.->|relay| K_AS
|
||||
OB_NOTI -.->|relay| K_NOTI
|
||||
OB_TOKEN -.->|relay| K_TOKEN
|
||||
|
||||
K_AGENT -.->|consume| AGENT
|
||||
K_TASK -.->|consume| TASK
|
||||
K_MEM -.->|consume| MEM
|
||||
K_AS -.->|consume| AS
|
||||
K_NOTI -.->|consume| NOTI
|
||||
K_TOKEN -.->|consume| TOKEN
|
||||
```
|
||||
|
||||
## 5.3 核心算法
|
||||
|
||||
@@ -686,12 +901,12 @@ frontend/src
|
||||
│ ├─ assistant/ # 上下文窗口、排程结果卡片、微调弹窗、任务类选择器
|
||||
│ ├─ common/ # 全局主侧边栏 MainSidebar
|
||||
│ ├─ dashboard/ # 首页卡片与 AssistantPanel
|
||||
│ └─ schedule/ # 任务类侧栏、周课表画板、创建弹窗
|
||||
│ └─ schedule/ # 任务类侧栏、周课表画板、创建弹窗、课表导入弹窗
|
||||
├─ router/ # 路由定义与守卫
|
||||
├─ stores/ # Pinia store(当前主要是 auth)
|
||||
├─ types/ # dashboard / schedule / api 类型定义
|
||||
├─ utils/ # 日期、Markdown、HTTP 错误、幂等 key 等工具
|
||||
├─ views/ # Auth / Dashboard / Assistant / Schedule / Prototype
|
||||
├─ views/ # Home / Auth / Dashboard / Assistant / Schedule / Forum / Store / PlanDetail / debug
|
||||
├─ App.vue # 全局布局壳层与 router-view 容器
|
||||
└─ main.ts # Vue / Pinia / Router / Element Plus 挂载入口
|
||||
```
|
||||
@@ -702,8 +917,8 @@ frontend/src
|
||||
2. 本地开发通过 Vite 代理把 `/api` 转发到 `http://127.0.0.1:8080`。
|
||||
3. 常规接口统一走 `frontend/src/api/http.ts`,内置 `401 -> refresh token -> 原请求重放`。
|
||||
4. 对话流接口 `POST /api/v1/agent/chat` 单独走原生 `fetch`,并在前端手动处理一次 refresh token 重试。
|
||||
5. 写操作尽量补 `X-Idempotency-Key`,当前任务类创建、日程应用、日程删除、任务块删除都已接入。
|
||||
6. `App.vue` 会对 `/dashboard`、`/assistant`、`/schedule` 统一套用主壳层与 `MainSidebar`,而不是每个页面各自维护一套侧边栏。
|
||||
5. 写操作尽量补 `X-Idempotency-Key`,当前任务类创建 / 更新、课表导入、日程应用、日程删除、任务块删除都已接入。
|
||||
6. `App.vue` 会对 `/dashboard`、`/assistant`、`/schedule`、`/forum`、`/store` 与 `/forum/:id` 统一套用主壳层与 `MainSidebar`,而不是每个页面各自维护一套侧边栏。
|
||||
|
||||
## 6.2 当前页面与路由状态
|
||||
|
||||
@@ -711,18 +926,23 @@ frontend/src
|
||||
|
||||
| 路由 | 页面状态 | 说明 |
|
||||
| --- | --- | --- |
|
||||
| `/` | 已完成 | 默认重定向到 `/dashboard` |
|
||||
| `/` | 已完成 | 独立 Home 落地页;CTA 会根据登录态跳转 `/auth` 或 `/dashboard` |
|
||||
| `/auth` | 已完成 | 登录/注册同页切换,支持 `redirect` 回跳 |
|
||||
| `/dashboard` | 已完成 | 首页工作台,承接四象限任务与今日日程 |
|
||||
| `/assistant` | 已完成 | `newAgent` 对话主入口,支持时间线、确认卡片、排程结果卡片 |
|
||||
| `/schedule` | 已完成 | 传统课表中心,支持任务类、粗排、拖拽预览与正式应用 |
|
||||
| `/prototype/tool-trace` | 原型页 | 用于展示工具 trace / 阻断 / 排程卡片交互原型,不在主导航中暴露 |
|
||||
| `/assistant/:id?` | 已完成 | `newAgent` 对话主入口,支持按会话 ID 直达历史会话 |
|
||||
| `/schedule` | 已完成 | 传统课表中心,支持任务类、课表导入、粗排、拖拽预览与正式应用 |
|
||||
| `/forum` | 已接路由 | 社区 / 方案广场入口,不在首页工作台内重复承载 |
|
||||
| `/forum/:id` | 已接路由 | 方案详情页,沿用主壳层与登录态守卫 |
|
||||
| `/store` | 已接路由 | 商店页,沿用主壳层与登录态守卫 |
|
||||
| `/debug/tool-card` | 调试页 | 单卡片调试页,不在主导航中暴露 |
|
||||
| `/debug/tool-cards` | 调试页 | 工具卡片集合调试页,不在主导航中暴露 |
|
||||
| `/debug/assistant/:id?` | 调试页 | 助手调试页,不在主导航中暴露 |
|
||||
|
||||
当前仍未接出正式独立路由的入口:
|
||||
当前仍未纳入正式主导航 / 独立业务入口的部分:
|
||||
|
||||
1. “任务”独立页仍未落地,侧边栏没有 `/task` 路由。
|
||||
2. 侧边栏底部“设置”按钮仍是视觉占位,尚未接出 `/settings`。
|
||||
3. `src/views/layout`、`src/views/login`、`src/views/register`、`src/views/settings` 以及 `src/store/` 目前更偏预留/历史残留目录,不承载当前主运行链路。
|
||||
3. 调试页已经接出正式路由,但仍只用于内部联调,不纳入正式主导航。
|
||||
|
||||
## 6.3 认证页 `/auth`
|
||||
|
||||
@@ -736,7 +956,7 @@ frontend/src
|
||||
|
||||
1. 登录与注册共用一页,通过 tab 切换。
|
||||
2. 登录成功后会把 `access_token`、`refresh_token` 与最近一次用户名写入 `localStorage`。
|
||||
3. 路由守卫会阻止未登录用户进入 `/dashboard`、`/assistant`、`/schedule`。
|
||||
3. 路由守卫会阻止未登录用户进入 `/dashboard`、`/assistant`、`/schedule`、`/forum`、`/forum/:id` 与 `/store`。
|
||||
4. 普通 JSON 接口走 Axios 自动续签;流式对话接口走 `fetch` 时也会在前端手动尝试一次 refresh token 重试。
|
||||
5. 登出时会先尽力调用后端注销接口,再无条件清理本地登录态,避免前端出现“假在线”。
|
||||
|
||||
@@ -752,13 +972,13 @@ frontend/src
|
||||
|
||||
当前已实现能力:
|
||||
|
||||
1. `/dashboard`、`/assistant`、`/schedule` 已统一挂在同一套全局壳层下,由 `App.vue + MainSidebar` 负责外层布局。
|
||||
2. 侧边栏当前只保留“总览 / 日程 / 助手”三项主导航,避免过早把尚未做完的页面入口暴露出来。
|
||||
1. `/dashboard`、`/assistant`、`/schedule`、`/forum`、`/store` 与 `/forum/:id` 已统一挂在同一套全局壳层下,由 `App.vue + MainSidebar` 负责外层布局。
|
||||
2. 侧边栏当前提供“总览 / 日程 / 助手 / 社区 / 商店”五项主导航;底部“设置”按钮仍是视觉占位。
|
||||
3. 首页中心区展示四象限任务卡片,支持获取任务列表、创建任务、完成任务、撤销完成任务。
|
||||
4. 右侧展示“今日日程”,通过 `GET /api/v1/schedule/today` 拉取当天事件。
|
||||
5. 首页顶部已具备退出登录、用户昵称展示、当前日期展示等基础工作台能力。
|
||||
6. 首页整体做了缩放适配,目标是在常见笔记本分辨率下尽量完整展示主要内容,而不是依赖用户手动缩放浏览器。
|
||||
7. “课表导入”入口目前仍是占位提示,还没有真正接出导入向导页。
|
||||
7. 课表导入入口已经收敛到 `/schedule` 页工具栏,通过弹窗完成图片识别、确认与正式导入。
|
||||
|
||||
## 6.5 AI 对话页 `/assistant`
|
||||
|
||||
@@ -812,16 +1032,19 @@ frontend/src
|
||||
|
||||
1. `/schedule` 仍然是一套独立于 `newAgent` 对话页的传统编排中心,主要承接任务类管理、粗排预览与正式应用。
|
||||
2. 左侧为任务类侧栏,右侧为周课表/排程画板,支持单选与批量多选两种任务类操作模式。
|
||||
3. 任务类侧栏支持获取任务类列表、展开详情、删除任务块、新建任务类弹窗,以及长列表滚动。
|
||||
3. 任务类侧栏支持获取任务类列表、展开详情、删除任务块、新建 / 更新任务类弹窗;右侧工具栏已接入课表图片导入对话框。
|
||||
4. 周课表支持周次切换,当前前端将请求范围限制在 `1 ~ 24` 周,并且加入请求序列号保护,快速切周时只认最后一次响应。
|
||||
5. 已接通的课表/编排相关接口包括:
|
||||
- `GET /api/v1/schedule/week`
|
||||
- `GET /api/v1/task-class/list`
|
||||
- `GET /api/v1/task-class/get`
|
||||
- `POST /api/v1/task-class/add`
|
||||
- `PUT /api/v1/task-class/update`
|
||||
- `DELETE /api/v1/task-class/delete-item`
|
||||
- `GET /api/v1/schedule/smart-planning`
|
||||
- `POST /api/v1/schedule/smart-planning-multi`
|
||||
- `POST /api/v1/course/parse-image`
|
||||
- `POST /api/v1/course/import`
|
||||
- `PUT /api/v1/task-class/apply-batch-into-schedule`
|
||||
- `DELETE /api/v1/schedule/delete`
|
||||
6. 智能编排结果当前分为单任务类粗排和多任务类批量粗排,结果先进入前端运行时预览态,而不会立即写入正式课表。
|
||||
@@ -833,32 +1056,38 @@ frontend/src
|
||||
|
||||
## 6.7 当前前后端衔接边界
|
||||
|
||||
当前前端已经覆盖的主业务链路:
|
||||
当前前端已经覆盖的主业务链路与调试入口:
|
||||
|
||||
1. 登录 / 注册 / 自动续签 / 安全登出。
|
||||
2. 首页四象限任务获取、创建、完成、撤销与今日日程展示。
|
||||
3. `newAgent` 对话、历史会话、思考内容展示、消息重试、结构化时间线、确认覆盖层、上下文窗口计量。
|
||||
4. AI 对话页中的排程结果卡片、结构化预览拉取、弹窗微调、暂存到 Redis 状态、正式应用到课表。
|
||||
5. 传统 `/schedule` 页面中的任务类管理、智能粗排、批量粗排、拖拽预览、正式应用、删除日程。
|
||||
6. `/prototype/tool-trace` 原型页,用于展示工具 trace 与交互形态。
|
||||
5. 传统 `/schedule` 页面中的任务类管理、课表导入、智能粗排、批量粗排、拖拽预览、正式应用、删除日程。
|
||||
6. `/debug/tool-card`、`/debug/tool-cards` 与 `/debug/assistant/:id?` 调试页,用于展示工具卡片与助手调试交互形态。
|
||||
|
||||
当前仍明确留给后续迭代的部分:
|
||||
|
||||
1. “任务”独立页面与“设置”独立页面尚未接出。
|
||||
2. AI 输入区已经预留附件上传按钮,但上传、解析、落盘、发送给模型的完整链路尚未接通。
|
||||
3. `/assistant` 与 `/schedule` 目前还是两套并行入口,状态模型与交互语义尚未完全统一。
|
||||
4. 课表导入流程入口虽然已预留,但还没有完整的导入页与导入向导。
|
||||
5. 用户消息“修改后原地替换旧消息”的真正后端语义尚未实现,目前仍按“复制到输入框后再发送一条新消息”处理。
|
||||
6. 原型页、预留目录和历史壳层代码还未进一步收敛清理,说明前端仍处在快速迭代期。
|
||||
4. 用户消息“修改后原地替换旧消息”的真正后端语义尚未实现,目前仍按“复制到输入框后再发送一条新消息”处理。
|
||||
5. 调试页、预留目录和历史壳层代码还未进一步收敛清理,说明前端仍处在快速迭代期。
|
||||
|
||||
# 7 部署与监控
|
||||
|
||||
## 7.1 容器化部署方案
|
||||
当前项目已经具备一套**“依赖栈容器化 + 应用进程宿主机运行”**的落地方案,适合本地联调、答辩演示和单机部署。
|
||||
当前项目已经同时提供两档容器化形态:
|
||||
|
||||
1. **基础设施容器化**:使用 `docker-compose.yml` 只拉起 MySQL / Redis / Kafka / Milvus 等依赖栈,适合本地开发与脚本托管后端。
|
||||
2. **整站容器化**:使用 `docker-compose.full.yml` 连同后端多服务与前端一起拉起,适合单机演示、联调和离线交付。
|
||||
|
||||
### 当前部署形态
|
||||
|
||||
当前根目录已经提供 `docker-compose.yml`,用于启动以下基础设施:
|
||||
当前根目录已经同时提供 `docker-compose.yml` 与 `docker-compose.full.yml`:
|
||||
|
||||
### 方案 A:基础设施容器化(本地开发)
|
||||
|
||||
`docker-compose.yml` 用于启动以下基础设施:
|
||||
|
||||
| 服务 | 端口 | 用途 |
|
||||
| --- | --- | --- |
|
||||
@@ -869,64 +1098,39 @@ frontend/src
|
||||
| MinIO | `9000` / `9001` | Milvus 对象存储依赖 |
|
||||
| Milvus Standalone | `19530` / `9091` | RAG / memory 向量检索引擎 |
|
||||
| Attu | `8000` | Milvus 可视化管理台 |
|
||||
| kafka-init | 无外部端口 | 启动时自动创建 `smartflow.agent.outbox` topic |
|
||||
| kafka-init | 无外部端口 | 启动时自动创建 `smartflow.*.outbox` 相关 topic |
|
||||
|
||||
其中,`docker-compose.yml` 已经为 MySQL、Redis、Kafka、etcd、MinIO、Milvus 配好了 `healthcheck`,并通过 `depends_on.condition: service_healthy` 保证依赖按健康状态顺序启动。
|
||||
|
||||
### 推荐启动顺序
|
||||
### 方案 B:整站容器化(单机部署 / 演示)
|
||||
|
||||
1. 先启动依赖栈:
|
||||
`docker-compose.full.yml` 会在上述基础设施之上,继续拉起:
|
||||
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
1. 后端服务:`userauth`、`notification`、`active-scheduler`、`schedule`、`task`、`task-class`、`course`、`memory`、`agent`、`taskclassforum`、`tokenstore`、`llm`、`api`
|
||||
2. 前端服务:`frontend`
|
||||
|
||||
2. 准备后端配置文件:
|
||||
对应镜像来源如下:
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
cp config.example.yaml config.yaml
|
||||
```
|
||||
1. `backend/Dockerfile`:统一构建后端多服务二进制,运行时镜像默认以 `api` 为入口,可被 `docker-compose.full.yml` 复用为整套后端服务。
|
||||
2. `frontend/Dockerfile`:构建 Vite 产物并通过 Nginx 提供静态站点服务。
|
||||
3. `deploy/docker-pack.ps1`:可在 Windows 环境打出 `smartflow/backend-suite` 与 `smartflow/frontend` 镜像包。
|
||||
4. `deploy/docker-load.sh`:可在目标机器批量导入镜像 tar 包。
|
||||
|
||||
然后按实际环境修改以下配置:
|
||||
### 推荐使用方式
|
||||
|
||||
1. `database`:MySQL 地址、用户名、密码、库名。
|
||||
2. `redis`:Redis 地址、密码。
|
||||
3. `kafka`:Broker 地址与 topic / groupID。
|
||||
4. `jwt`:`accessSecret` 与 `refreshSecret`。
|
||||
5. `agent`:模型名、`baseURL`、推理开关。
|
||||
6. `rag` / `memory`:Milvus 地址、embedding 配置、memory 模式。
|
||||
7. `websearch`:联网搜索 provider 与 API Key。
|
||||
1. **本地开发**:先执行 `docker compose up -d` 拉起 `docker-compose.yml` 里的依赖栈,然后按第 8 章的脚本流启动后端与前端开发环境。
|
||||
2. **整站部署 / 演示**:先构建或导入应用镜像,再执行 `docker compose -f docker-compose.full.yml up -d` 拉起完整站点。
|
||||
|
||||
3. 启动后端:
|
||||
整站容器化默认暴露:
|
||||
|
||||
```bash
|
||||
cd backend
|
||||
go run .
|
||||
```
|
||||
|
||||
后端默认监听 `8080`,并提供健康检查接口:
|
||||
|
||||
```text
|
||||
GET /api/v1/health
|
||||
```
|
||||
|
||||
4. 启动前端:
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
前端默认运行在 `5173`,并通过 Vite 代理把 `/api` 转发到 `http://127.0.0.1:8080`。
|
||||
1. `api`:`8080`
|
||||
2. `frontend`:`80`
|
||||
|
||||
### 当前方案的边界
|
||||
|
||||
1. **已容器化的部分**:MySQL、Redis、Kafka、Milvus 及其依赖。
|
||||
2. **未容器化的部分**:Go 后端进程与 Vue 前端进程当前仍以宿主机方式运行,仓库中尚未提供后端/前端 Dockerfile。
|
||||
3. **因此更准确的表述**:当前项目已经完成“基础设施容器化”,但还没有做到“整站一键镜像化部署”。
|
||||
4. **如果后续继续工程化**:优先补 `backend/Dockerfile`、`frontend/Dockerfile` 与生产态反向代理配置,再把前后端服务一并纳入 compose 编排。
|
||||
1. **已具备完整应用层容器化能力**:仓库已提供后端 / 前端 Dockerfile 与 full-stack compose,旧版 README 中“应用层尚未容器化”的表述已不再适用。
|
||||
2. **当前更偏单机编排**:`docker-compose.full.yml` 适合开发、演示与单机交付,尚未展开到多实例调度、灰度发布、集中式日志平台等更重的生产治理能力。
|
||||
3. **本地开发入口已切换**:仓库根 `go run .` 现在只是兼容壳入口;当前推荐的本地后端启动方式以 `backend/scripts/*.ps1` 为准,避免和第 8 章冲突。
|
||||
|
||||
## 7.2 性能监控&统计
|
||||
当前项目已经接入了一套**轻量级、以日志和内存计数器为主的观测方案**,但还没有完整接入 Prometheus / Grafana 这类统一监控平台。
|
||||
@@ -984,7 +1188,34 @@ npm run dev
|
||||
|
||||
# 8 快速开始
|
||||
|
||||
## 8.1 启动前端开发环境
|
||||
## 8.1 后端本地快速启动
|
||||
|
||||
后端开发统一使用 `backend` 根目录下的 PowerShell 启动脚本,不再维护 `cmd/all` 聚合入口。
|
||||
|
||||
```powershell
|
||||
cd backend
|
||||
.\scripts\dev-up.ps1
|
||||
.\scripts\services-up.ps1
|
||||
.\scripts\dev-status.ps1
|
||||
.\scripts\dev-logs.ps1 -Service api -Stream stdout -Follow
|
||||
.\scripts\service-restart.ps1 -Service api
|
||||
.\scripts\services-down.ps1
|
||||
.\scripts\dev-down.ps1
|
||||
.\scripts\dev-down.ps1 -StopInfra
|
||||
```
|
||||
|
||||
说明:
|
||||
|
||||
1. 所有后端脚本统一收敛在 `backend/scripts` 目录下。
|
||||
2. `scripts/dev-up.ps1` 会先确保 Docker 基础设施就绪,再按顺序构建并拉起全部 RPC 服务与 API。
|
||||
3. `scripts/services-up.ps1` 只拉起后端服务本身,不触碰 Docker 基础设施。
|
||||
4. `scripts/dev-status.ps1` 用于查看各服务是脚本托管、外部运行还是未启动。
|
||||
5. `scripts/dev-logs.ps1` 用于查看单个服务最新日志;可选 `-Stream stdout|stderr|both`,带 `-Follow` 可持续追日志。
|
||||
6. `scripts/service-restart.ps1 -Service <name>` 用于重启单个脚本托管的后端服务;若该服务由外部进程托管,则会直接拒绝操作。
|
||||
7. `scripts/services-down.ps1` 只停止脚本托管的后端服务进程。
|
||||
8. `scripts/dev-down.ps1` 默认只停止脚本托管的后端进程;加 `-StopInfra` 才会一并停止 Docker 基础设施。
|
||||
|
||||
## 8.2 启动前端开发环境
|
||||
|
||||
前端目录在 `frontend/`,本地开发步骤如下:
|
||||
|
||||
@@ -1000,7 +1231,7 @@ npm run dev
|
||||
2. 开发代理目标:`http://127.0.0.1:8080`
|
||||
3. 因此前端本地联调前,需要先确保后端服务已经启动在 `8080`
|
||||
|
||||
## 8.2 前端生产构建
|
||||
## 8.3 前端生产构建
|
||||
|
||||
```bash
|
||||
cd frontend
|
||||
@@ -1013,7 +1244,7 @@ npm run preview
|
||||
1. `npm run build` 会先执行 `vue-tsc -b` 做类型检查,再执行 `vite build`。
|
||||
2. 当前构建是可通过的;但由于主包仍然偏大,Vite 会给出 chunk size warning,这属于现阶段可接受状态。
|
||||
|
||||
## 8.3 建议的前后端联调顺序
|
||||
## 8.4 建议的前后端联调顺序
|
||||
|
||||
建议按下面顺序启动和验证:
|
||||
|
||||
|
||||
5
backend/.dockerignore
Normal file
5
backend/.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
.dev/
|
||||
.gocache/
|
||||
.gopath/
|
||||
bin/
|
||||
config.yaml
|
||||
67
backend/Dockerfile
Normal file
67
backend/Dockerfile
Normal file
@@ -0,0 +1,67 @@
|
||||
# syntax=docker/dockerfile:1.7
|
||||
|
||||
ARG GO_IMAGE=golang:1.25-bookworm
|
||||
ARG RUNTIME_IMAGE=debian:bookworm-slim
|
||||
|
||||
FROM ${GO_IMAGE} AS builder
|
||||
|
||||
WORKDIR /src/backend
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG TARGETOS=linux
|
||||
ARG TARGETARCH=amd64
|
||||
|
||||
FROM builder AS suite-builder
|
||||
|
||||
# 1. 统一构建所有需要部署的后端服务二进制,避免每个服务维护一份 Dockerfile。
|
||||
# 2. 输出目录固定为 /out,便于运行时镜像按命令复用同一套产物。
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
mkdir -p /out && \
|
||||
for service in userauth notification active-scheduler schedule task task-class course memory agent taskclassforum tokenstore llm api; do \
|
||||
CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o "/out/${service}" "./cmd/${service}"; \
|
||||
done
|
||||
|
||||
FROM builder AS service-builder
|
||||
|
||||
ARG SERVICE=api
|
||||
|
||||
# 1. 服务级镜像只编译一个入口,减少单服务发布时需要上传的二进制体积。
|
||||
# 2. SERVICE 必须对应 backend/cmd 下的目录;构建失败会直接暴露错误,避免发布错误镜像。
|
||||
RUN --mount=type=cache,target=/root/.cache/go-build \
|
||||
mkdir -p /out && \
|
||||
CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o "/out/${SERVICE}" "./cmd/${SERVICE}"
|
||||
|
||||
FROM ${RUNTIME_IMAGE} AS runtime-base
|
||||
|
||||
WORKDIR /app/backend
|
||||
|
||||
# 1. 运行时只保留证书与时区数据,保证 HTTPS 请求与国内时区配置可用。
|
||||
# 2. 默认附带一份容器配置模板,实际部署仍可通过挂载文件覆盖。
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates tzdata \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY config.docker.yaml /app/backend/config.docker.yaml
|
||||
|
||||
ENV TZ=Asia/Shanghai
|
||||
ENV SMARTFLOW_CONFIG_FILE=/app/backend/config.docker.yaml
|
||||
|
||||
FROM runtime-base AS runtime-suite
|
||||
|
||||
COPY --from=suite-builder /out /app/bin
|
||||
|
||||
CMD ["/app/bin/api"]
|
||||
|
||||
FROM runtime-base AS runtime-service
|
||||
|
||||
ARG SERVICE=api
|
||||
|
||||
COPY --from=service-builder /out/${SERVICE} /app/bin/${SERVICE}
|
||||
|
||||
CMD ["/app/bin/api"]
|
||||
|
||||
FROM runtime-suite AS runtime
|
||||
@@ -1,25 +0,0 @@
|
||||
package bootstrap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// LoadConfig 统一加载后端进程配置。
|
||||
// 职责边界:
|
||||
// 1. 只负责把 config.yaml 读入 viper,不解释具体业务配置语义;
|
||||
// 2. 同时兼容从仓库根目录和 backend 目录启动的两种路径;
|
||||
// 3. 失败时返回 error,由各进程入口决定是否退出。
|
||||
func LoadConfig() error {
|
||||
viper.SetConfigName("config")
|
||||
viper.SetConfigType("yaml")
|
||||
viper.AddConfigPath(".")
|
||||
viper.AddConfigPath("backend")
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return fmt.Errorf("failed to read config file: %w", err)
|
||||
}
|
||||
log.Println("Config loaded successfully")
|
||||
return nil
|
||||
}
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -14,8 +14,9 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "127.0.0.1:9087"
|
||||
defaultTimeout = 10 * time.Second
|
||||
defaultEndpoint = "127.0.0.1:9087"
|
||||
// 课表导入可能一次展开大量周次与节次,RPC 默认超时与网关保持一致,避免内层先被截断。
|
||||
defaultTimeout = 5 * time.Minute
|
||||
defaultMaxRPCMessageSize = 8 * 1024 * 1024
|
||||
rpcMessageSizePadding = 1024 * 1024
|
||||
)
|
||||
@@ -91,6 +92,7 @@ func (c *Client) ImportCourses(ctx context.Context, req coursecontracts.UserImpo
|
||||
|
||||
func (c *Client) ParseCourseTableImage(ctx context.Context, req coursecontracts.CourseImageParseRequest) (json.RawMessage, error) {
|
||||
resp, err := c.rpc.ParseCourseImage(ctx, &coursepb.CourseImageRequest{
|
||||
UserId: uint64(req.UserID),
|
||||
Filename: req.Filename,
|
||||
MimeType: req.MIMEType,
|
||||
ImageBytes: req.ImageBytes,
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
301
backend/client/llm/client.go
Normal file
301
backend/client/llm/client.go
Normal file
@@ -0,0 +1,301 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
llmrpc "github.com/LoveLosita/smartflow/backend/services/llm/rpc"
|
||||
llmcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/llm"
|
||||
"github.com/cloudwego/eino/schema"
|
||||
"github.com/zeromicro/go-zero/zrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "127.0.0.1:9096"
|
||||
defaultTimeout = 0
|
||||
defaultPingTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
type ClientConfig struct {
|
||||
Endpoints []string
|
||||
Target string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
type ServiceConfig struct {
|
||||
ClientConfig
|
||||
CourseVisionModel string
|
||||
}
|
||||
|
||||
// Client 是业务进程访问独立 LLM 服务的最小 RPC 适配层。
|
||||
type Client struct {
|
||||
rpc llmrpc.LLMClient
|
||||
}
|
||||
|
||||
func NewClient(cfg ClientConfig) (*Client, error) {
|
||||
timeout := cfg.Timeout
|
||||
if timeout < 0 {
|
||||
timeout = defaultTimeout
|
||||
}
|
||||
endpoints := normalizeEndpoints(cfg.Endpoints)
|
||||
target := strings.TrimSpace(cfg.Target)
|
||||
if len(endpoints) == 0 && target == "" {
|
||||
endpoints = []string{defaultEndpoint}
|
||||
}
|
||||
|
||||
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
|
||||
Endpoints: endpoints,
|
||||
Target: target,
|
||||
NonBlock: true,
|
||||
Timeout: int64(timeout / time.Millisecond),
|
||||
}, zrpc.WithDialOption(llmrpc.JSONCodecDialOption()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := &Client{rpc: llmrpc.NewLLMClient(zclient.Conn())}
|
||||
if err = client.ping(resolvePingTimeout(timeout)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// NewService 一次性把远端 LLM RPC 包装回旧的 *llmservice.Service 门面。
|
||||
func NewService(cfg ServiceConfig) (*llmservice.Service, error) {
|
||||
client, err := NewClient(cfg.ClientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.BuildService(cfg.CourseVisionModel), nil
|
||||
}
|
||||
|
||||
func (c *Client) BuildService(courseVisionModel string) *llmservice.Service {
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return llmservice.NewWithClients(llmservice.StaticClients{
|
||||
Lite: buildTextClient(c, llmcontracts.ModelAliasLite),
|
||||
Pro: buildTextClient(c, llmcontracts.ModelAliasPro),
|
||||
Max: buildTextClient(c, llmcontracts.ModelAliasMax),
|
||||
CourseImageResponses: llmservice.NewArkResponsesClientWithFunc(courseVisionModel, func(ctx context.Context, messages []llmservice.ArkResponsesMessage, options llmservice.ArkResponsesOptions) (*llmservice.ArkResponsesResult, error) {
|
||||
return c.GenerateResponsesText(ctx, llmcontracts.ModelAliasCourseImageResponses, messages, options)
|
||||
}),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *Client) Ping(ctx context.Context) error {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := c.rpc.Ping(ctx, &llmcontracts.PingRequest{})
|
||||
return responseFromRPCError(err)
|
||||
}
|
||||
|
||||
func (c *Client) GenerateText(ctx context.Context, modelAlias string, messages []*schema.Message, options llmservice.GenerateOptions) (*llmservice.TextResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.rpc.GenerateText(ctx, &llmcontracts.TextRequest{
|
||||
ModelAlias: modelAlias,
|
||||
Messages: messages,
|
||||
Options: toContractGenerateOptions(options),
|
||||
Billing: billingFromContext(ctx, modelAlias),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil || resp.Result == nil {
|
||||
return nil, errors.New("llm zrpc service returned empty text response")
|
||||
}
|
||||
return &llmservice.TextResult{
|
||||
Text: resp.Result.Text,
|
||||
Usage: llmservice.CloneUsage(resp.Result.Usage),
|
||||
FinishReason: resp.Result.FinishReason,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) StreamText(ctx context.Context, modelAlias string, messages []*schema.Message, options llmservice.GenerateOptions) (llmservice.StreamReader, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stream, err := c.rpc.StreamText(ctx, &llmcontracts.StreamTextRequest{
|
||||
ModelAlias: modelAlias,
|
||||
Messages: messages,
|
||||
Options: toContractGenerateOptions(options),
|
||||
Billing: billingFromContext(ctx, modelAlias),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
return &streamReader{stream: stream}, nil
|
||||
}
|
||||
|
||||
func (c *Client) GenerateResponsesText(ctx context.Context, modelAlias string, messages []llmservice.ArkResponsesMessage, options llmservice.ArkResponsesOptions) (*llmservice.ArkResponsesResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := c.rpc.GenerateResponsesText(ctx, &llmcontracts.ResponsesRequest{
|
||||
ModelAlias: modelAlias,
|
||||
Messages: toContractResponsesMessages(messages),
|
||||
Options: toContractResponsesOptions(options),
|
||||
Billing: billingFromContext(ctx, modelAlias),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil || resp.Result == nil {
|
||||
return nil, errors.New("llm zrpc service returned empty responses response")
|
||||
}
|
||||
return toServiceResponsesResult(resp.Result), nil
|
||||
}
|
||||
|
||||
func (c *Client) ensureReady() error {
|
||||
if c == nil || c.rpc == nil {
|
||||
return errors.New("llm zrpc client is not initialized")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) ping(timeout time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
return c.Ping(ctx)
|
||||
}
|
||||
|
||||
type streamReader struct {
|
||||
stream llmrpc.LLM_StreamTextClient
|
||||
}
|
||||
|
||||
func (r *streamReader) Recv() (*schema.Message, error) {
|
||||
if r == nil || r.stream == nil {
|
||||
return nil, errors.New("llm zrpc stream is not initialized")
|
||||
}
|
||||
|
||||
chunk, err := r.stream.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if chunk == nil {
|
||||
return nil, errors.New("llm zrpc service returned empty stream chunk")
|
||||
}
|
||||
return chunk.Message, nil
|
||||
}
|
||||
|
||||
func (r *streamReader) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildTextClient(remote *Client, modelAlias string) *llmservice.Client {
|
||||
return llmservice.NewClient(
|
||||
func(ctx context.Context, messages []*schema.Message, options llmservice.GenerateOptions) (*llmservice.TextResult, error) {
|
||||
return remote.GenerateText(ctx, modelAlias, messages, options)
|
||||
},
|
||||
func(ctx context.Context, messages []*schema.Message, options llmservice.GenerateOptions) (llmservice.StreamReader, error) {
|
||||
return remote.StreamText(ctx, modelAlias, messages, options)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func billingFromContext(ctx context.Context, modelAlias string) *llmcontracts.BillingContext {
|
||||
billing, ok := llmservice.BillingContextFromContext(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if strings.TrimSpace(billing.ModelAlias) == "" {
|
||||
billing.ModelAlias = strings.TrimSpace(modelAlias)
|
||||
}
|
||||
return &llmcontracts.BillingContext{
|
||||
UserID: billing.UserID,
|
||||
EventID: billing.EventID,
|
||||
Scene: billing.Scene,
|
||||
RequestID: billing.RequestID,
|
||||
ConversationID: billing.ConversationID,
|
||||
ModelAlias: billing.ModelAlias,
|
||||
SkipCharge: billing.SkipCharge,
|
||||
}
|
||||
}
|
||||
|
||||
func toContractGenerateOptions(input llmservice.GenerateOptions) llmcontracts.GenerateOptions {
|
||||
return llmcontracts.GenerateOptions{
|
||||
Temperature: input.Temperature,
|
||||
MaxTokens: input.MaxTokens,
|
||||
Thinking: string(input.Thinking),
|
||||
Metadata: input.Metadata,
|
||||
}
|
||||
}
|
||||
|
||||
func toContractResponsesMessages(input []llmservice.ArkResponsesMessage) []llmcontracts.ResponsesMessage {
|
||||
if len(input) == 0 {
|
||||
return nil
|
||||
}
|
||||
output := make([]llmcontracts.ResponsesMessage, 0, len(input))
|
||||
for _, item := range input {
|
||||
output = append(output, llmcontracts.ResponsesMessage{
|
||||
Role: item.Role,
|
||||
Text: item.Text,
|
||||
ImageURL: item.ImageURL,
|
||||
ImageDetail: item.ImageDetail,
|
||||
})
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func toContractResponsesOptions(input llmservice.ArkResponsesOptions) llmcontracts.ResponsesOptions {
|
||||
return llmcontracts.ResponsesOptions{
|
||||
Model: input.Model,
|
||||
Temperature: input.Temperature,
|
||||
MaxOutputTokens: input.MaxOutputTokens,
|
||||
Thinking: string(input.Thinking),
|
||||
TextFormat: input.TextFormat,
|
||||
}
|
||||
}
|
||||
|
||||
func toServiceResponsesResult(result *llmcontracts.ResponsesResult) *llmservice.ArkResponsesResult {
|
||||
if result == nil {
|
||||
return nil
|
||||
}
|
||||
output := &llmservice.ArkResponsesResult{
|
||||
Text: result.Text,
|
||||
Status: result.Status,
|
||||
IncompleteReason: result.IncompleteReason,
|
||||
ErrorCode: result.ErrorCode,
|
||||
ErrorMessage: result.ErrorMessage,
|
||||
}
|
||||
if result.Usage != nil {
|
||||
output.Usage = &llmservice.ArkResponsesUsage{
|
||||
InputTokens: result.Usage.InputTokens,
|
||||
OutputTokens: result.Usage.OutputTokens,
|
||||
TotalTokens: result.Usage.TotalTokens,
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
func normalizeEndpoints(values []string) []string {
|
||||
endpoints := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed != "" {
|
||||
endpoints = append(endpoints, trimmed)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func resolvePingTimeout(timeout time.Duration) time.Duration {
|
||||
if timeout > 0 && timeout < defaultPingTimeout {
|
||||
return timeout
|
||||
}
|
||||
return defaultPingTimeout
|
||||
}
|
||||
73
backend/client/llm/errors.go
Normal file
73
backend/client/llm/errors.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package llm
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
func responseFromRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return wrapRPCError(err)
|
||||
}
|
||||
if message, ok := messageFromStatus(st); ok {
|
||||
switch st.Code() {
|
||||
case codes.InvalidArgument, codes.ResourceExhausted, codes.FailedPrecondition:
|
||||
return errors.New(message)
|
||||
}
|
||||
}
|
||||
|
||||
switch st.Code() {
|
||||
case codes.InvalidArgument, codes.ResourceExhausted, codes.FailedPrecondition:
|
||||
return errors.New(strings.TrimSpace(st.Message()))
|
||||
case codes.Internal, codes.Unknown, codes.Unavailable, codes.DeadlineExceeded, codes.DataLoss, codes.Unimplemented:
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = "llm zrpc service internal error"
|
||||
}
|
||||
return wrapRPCError(errors.New(msg))
|
||||
default:
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = "llm zrpc service rejected request"
|
||||
}
|
||||
return errors.New(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func messageFromStatus(st *status.Status) (string, bool) {
|
||||
if st == nil {
|
||||
return "", false
|
||||
}
|
||||
for _, detail := range st.Details() {
|
||||
info, ok := detail.(*errdetails.ErrorInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
message := strings.TrimSpace(st.Message())
|
||||
if message == "" && info.Metadata != nil {
|
||||
message = strings.TrimSpace(info.Metadata["info"])
|
||||
}
|
||||
if message == "" {
|
||||
message = strings.TrimSpace(info.Reason)
|
||||
}
|
||||
return message, message != ""
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func wrapRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("调用 llm zrpc 服务失败: %w", err)
|
||||
}
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
470
backend/client/taskclassforum/client.go
Normal file
470
backend/client/taskclassforum/client.go
Normal file
@@ -0,0 +1,470 @@
|
||||
package taskclassforum
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/taskclassforum/rpc/pb"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclassforum"
|
||||
"github.com/zeromicro/go-zero/zrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "127.0.0.1:9090"
|
||||
defaultTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
type ClientConfig struct {
|
||||
Endpoints []string
|
||||
Target string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// Client 是 gateway 侧访问计划广场 zrpc 的适配层。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责 HTTP gateway 与 taskclassforum zrpc 之间的协议转译;
|
||||
// 2. 不直连 forum_* 表,也不读取旧 TaskClass 表,所有业务规则交给 taskclassforum 服务;
|
||||
// 3. gRPC 业务错误会在这里反解回 respond.Response,便于 HTTP 层统一返回。
|
||||
type Client struct {
|
||||
rpc pb.TaskClassForumServiceClient
|
||||
}
|
||||
|
||||
func NewClient(cfg ClientConfig) (*Client, error) {
|
||||
timeout := cfg.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeout
|
||||
}
|
||||
endpoints := normalizeEndpoints(cfg.Endpoints)
|
||||
target := strings.TrimSpace(cfg.Target)
|
||||
if len(endpoints) == 0 && target == "" {
|
||||
endpoints = []string{defaultEndpoint}
|
||||
}
|
||||
|
||||
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
|
||||
Endpoints: endpoints,
|
||||
Target: target,
|
||||
NonBlock: true,
|
||||
Timeout: int64(timeout / time.Millisecond),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{rpc: pb.NewTaskClassForumServiceClient(zclient.Conn())}, nil
|
||||
}
|
||||
|
||||
func (c *Client) ListPosts(ctx context.Context, actorUserID uint64, page int, pageSize int, sort string, keyword string, tag string) ([]contracts.ForumPostBrief, contracts.PageResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, contracts.PageResult{}, err
|
||||
}
|
||||
resp, err := c.rpc.ListPosts(ctx, &pb.ListForumPostsRequest{
|
||||
ActorUserId: actorUserID,
|
||||
Page: int32(page),
|
||||
PageSize: int32(pageSize),
|
||||
Sort: sort,
|
||||
Keyword: keyword,
|
||||
Tag: tag,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, contracts.PageResult{}, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, contracts.PageResult{}, errors.New("taskclassforum zrpc service returned empty list posts response")
|
||||
}
|
||||
return forumPostBriefsFromPB(resp.Items), pageFromPB(resp.Page), nil
|
||||
}
|
||||
|
||||
func (c *Client) ListTags(ctx context.Context, actorUserID uint64, limit int) ([]contracts.ForumTagItem, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.ListTags(ctx, &pb.ListForumTagsRequest{
|
||||
ActorUserId: actorUserID,
|
||||
Limit: int32(limit),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("taskclassforum zrpc service returned empty list tags response")
|
||||
}
|
||||
return forumTagItemsFromPB(resp.Items), nil
|
||||
}
|
||||
|
||||
func (c *Client) CreatePost(ctx context.Context, req contracts.CreateForumPostRequest) (*contracts.ForumPostBrief, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.CreatePost(ctx, &pb.CreateForumPostRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
TaskClassId: req.TaskClassID,
|
||||
Title: req.Title,
|
||||
Summary: req.Summary,
|
||||
Tags: append([]string(nil), req.Tags...),
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("taskclassforum zrpc service returned empty create post response")
|
||||
}
|
||||
post := forumPostBriefFromPB(resp.Post)
|
||||
return &post, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetPost(ctx context.Context, actorUserID uint64, postID uint64) (*contracts.ForumPostDetail, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.GetPost(ctx, &pb.GetForumPostRequest{
|
||||
ActorUserId: actorUserID,
|
||||
PostId: postID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("taskclassforum zrpc service returned empty get post response")
|
||||
}
|
||||
data := forumPostDetailFromPB(resp.Data)
|
||||
return &data, nil
|
||||
}
|
||||
|
||||
func (c *Client) LikePost(ctx context.Context, actorUserID uint64, postID uint64) (contracts.ForumPostCounters, contracts.ForumPostViewerState, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return contracts.ForumPostCounters{}, contracts.ForumPostViewerState{}, err
|
||||
}
|
||||
resp, err := c.rpc.LikePost(ctx, &pb.LikeForumPostRequest{
|
||||
ActorUserId: actorUserID,
|
||||
PostId: postID,
|
||||
})
|
||||
if err != nil {
|
||||
return contracts.ForumPostCounters{}, contracts.ForumPostViewerState{}, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return contracts.ForumPostCounters{}, contracts.ForumPostViewerState{}, errors.New("taskclassforum zrpc service returned empty like response")
|
||||
}
|
||||
return forumPostCountersFromPB(resp.Counters), forumPostViewerStateFromPB(resp.ViewerState), nil
|
||||
}
|
||||
|
||||
func (c *Client) UnlikePost(ctx context.Context, actorUserID uint64, postID uint64) (contracts.ForumPostCounters, contracts.ForumPostViewerState, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return contracts.ForumPostCounters{}, contracts.ForumPostViewerState{}, err
|
||||
}
|
||||
resp, err := c.rpc.UnlikePost(ctx, &pb.UnlikeForumPostRequest{
|
||||
ActorUserId: actorUserID,
|
||||
PostId: postID,
|
||||
})
|
||||
if err != nil {
|
||||
return contracts.ForumPostCounters{}, contracts.ForumPostViewerState{}, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return contracts.ForumPostCounters{}, contracts.ForumPostViewerState{}, errors.New("taskclassforum zrpc service returned empty unlike response")
|
||||
}
|
||||
return forumPostCountersFromPB(resp.Counters), forumPostViewerStateFromPB(resp.ViewerState), nil
|
||||
}
|
||||
|
||||
func (c *Client) ListComments(ctx context.Context, actorUserID uint64, postID uint64, page int, pageSize int, sort string) ([]contracts.ForumCommentNode, contracts.PageResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, contracts.PageResult{}, err
|
||||
}
|
||||
resp, err := c.rpc.ListComments(ctx, &pb.ListForumCommentsRequest{
|
||||
ActorUserId: actorUserID,
|
||||
PostId: postID,
|
||||
Page: int32(page),
|
||||
PageSize: int32(pageSize),
|
||||
Sort: sort,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, contracts.PageResult{}, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, contracts.PageResult{}, errors.New("taskclassforum zrpc service returned empty list comments response")
|
||||
}
|
||||
return forumCommentNodesFromPB(resp.Items), pageFromPB(resp.Page), nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateComment(ctx context.Context, req contracts.CreateForumCommentRequest) (*contracts.ForumCommentNode, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.CreateComment(ctx, &pb.CreateForumCommentRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
PostId: req.PostID,
|
||||
Content: req.Content,
|
||||
ParentCommentId: uint64FromPtr(req.ParentCommentID),
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("taskclassforum zrpc service returned empty create comment response")
|
||||
}
|
||||
comment := forumCommentNodeFromPB(resp.Comment)
|
||||
return &comment, nil
|
||||
}
|
||||
|
||||
func (c *Client) DeleteComment(ctx context.Context, actorUserID uint64, commentID uint64) (*contracts.DeleteForumCommentResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.DeleteComment(ctx, &pb.DeleteForumCommentRequest{
|
||||
ActorUserId: actorUserID,
|
||||
CommentId: commentID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("taskclassforum zrpc service returned empty delete comment response")
|
||||
}
|
||||
deletedAt := time.Now().Format(time.RFC3339)
|
||||
return &contracts.DeleteForumCommentResult{
|
||||
CommentID: resp.CommentId,
|
||||
Status: resp.Status,
|
||||
Content: "",
|
||||
DeletedAt: &deletedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) ImportPost(ctx context.Context, req contracts.ImportForumPostRequest) (*contracts.ImportForumPostResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.ImportPost(ctx, &pb.ImportForumPostRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
PostId: req.PostID,
|
||||
TargetTitle: req.TargetTitle,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("taskclassforum zrpc service returned empty import post response")
|
||||
}
|
||||
return &contracts.ImportForumPostResult{
|
||||
ImportID: resp.ImportId,
|
||||
PostID: resp.PostId,
|
||||
NewTaskClassID: resp.NewTaskClassId,
|
||||
TaskClassTitle: resp.TaskClassTitle,
|
||||
ImportCount: resp.ImportCount,
|
||||
CreatedAt: resp.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) ensureReady() error {
|
||||
if c == nil || c.rpc == nil {
|
||||
return errors.New("taskclassforum zrpc client is not initialized")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizeEndpoints(values []string) []string {
|
||||
endpoints := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed != "" {
|
||||
endpoints = append(endpoints, trimmed)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func pageFromPB(page *pb.PageResponse) contracts.PageResult {
|
||||
if page == nil {
|
||||
return contracts.PageResult{}
|
||||
}
|
||||
return contracts.PageResult{
|
||||
Page: int(page.Page),
|
||||
PageSize: int(page.PageSize),
|
||||
Total: int(page.Total),
|
||||
HasMore: page.HasMore,
|
||||
}
|
||||
}
|
||||
|
||||
func forumUserFromPB(user *pb.UserBrief) contracts.UserBrief {
|
||||
if user == nil {
|
||||
return contracts.UserBrief{}
|
||||
}
|
||||
return contracts.UserBrief{
|
||||
UserID: user.UserId,
|
||||
Nickname: user.Nickname,
|
||||
AvatarURL: user.AvatarUrl,
|
||||
}
|
||||
}
|
||||
|
||||
func forumTemplateSummaryFromPB(summary *pb.TemplateSummary) contracts.TemplateSummary {
|
||||
if summary == nil {
|
||||
return contracts.TemplateSummary{}
|
||||
}
|
||||
return contracts.TemplateSummary{
|
||||
TaskCount: int(summary.TaskCount),
|
||||
Mode: summary.Mode,
|
||||
StartDate: summary.StartDate,
|
||||
EndDate: summary.EndDate,
|
||||
StrategyLabels: append([]string(nil), summary.StrategyLabels...),
|
||||
}
|
||||
}
|
||||
|
||||
func forumPostCountersFromPB(counters *pb.ForumPostCounters) contracts.ForumPostCounters {
|
||||
if counters == nil {
|
||||
return contracts.ForumPostCounters{}
|
||||
}
|
||||
return contracts.ForumPostCounters{
|
||||
LikeCount: counters.LikeCount,
|
||||
CommentCount: counters.CommentCount,
|
||||
ImportCount: counters.ImportCount,
|
||||
}
|
||||
}
|
||||
|
||||
func forumPostViewerStateFromPB(state *pb.ForumPostViewerState) contracts.ForumPostViewerState {
|
||||
if state == nil {
|
||||
return contracts.ForumPostViewerState{}
|
||||
}
|
||||
return contracts.ForumPostViewerState{
|
||||
Liked: state.Liked,
|
||||
ImportedOnce: state.ImportedOnce,
|
||||
}
|
||||
}
|
||||
|
||||
func forumPostBriefFromPB(post *pb.ForumPostBrief) contracts.ForumPostBrief {
|
||||
if post == nil {
|
||||
return contracts.ForumPostBrief{}
|
||||
}
|
||||
return contracts.ForumPostBrief{
|
||||
PostID: post.PostId,
|
||||
Title: post.Title,
|
||||
Summary: post.Summary,
|
||||
Tags: append([]string(nil), post.Tags...),
|
||||
Author: forumUserFromPB(post.Author),
|
||||
TemplateSummary: forumTemplateSummaryFromPB(post.TemplateSummary),
|
||||
Counters: forumPostCountersFromPB(post.Counters),
|
||||
ViewerState: forumPostViewerStateFromPB(post.ViewerState),
|
||||
Status: post.Status,
|
||||
CreatedAt: post.CreatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func forumPostBriefsFromPB(items []*pb.ForumPostBrief) []contracts.ForumPostBrief {
|
||||
if len(items) == 0 {
|
||||
return []contracts.ForumPostBrief{}
|
||||
}
|
||||
result := make([]contracts.ForumPostBrief, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, forumPostBriefFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func forumTemplateDetailFromPB(detail *pb.TemplateDetail) contracts.TemplateDetail {
|
||||
if detail == nil {
|
||||
return contracts.TemplateDetail{}
|
||||
}
|
||||
items := make([]contracts.TemplateItemPreview, 0, len(detail.ItemsPreview))
|
||||
for _, item := range detail.ItemsPreview {
|
||||
if item == nil {
|
||||
continue
|
||||
}
|
||||
items = append(items, contracts.TemplateItemPreview{
|
||||
ItemID: item.ItemId,
|
||||
Order: int(item.Order),
|
||||
Content: item.Content,
|
||||
})
|
||||
}
|
||||
return contracts.TemplateDetail{
|
||||
Mode: detail.Mode,
|
||||
StartDate: detail.StartDate,
|
||||
EndDate: detail.EndDate,
|
||||
StrategyLabels: append([]string(nil), detail.StrategyLabels...),
|
||||
TaskCount: int(detail.TaskCount),
|
||||
ItemsPreview: items,
|
||||
}
|
||||
}
|
||||
|
||||
func forumPostDetailFromPB(detail *pb.ForumPostDetail) contracts.ForumPostDetail {
|
||||
if detail == nil {
|
||||
return contracts.ForumPostDetail{}
|
||||
}
|
||||
return contracts.ForumPostDetail{
|
||||
Post: forumPostBriefFromPB(detail.Post),
|
||||
Template: forumTemplateDetailFromPB(detail.Template),
|
||||
}
|
||||
}
|
||||
|
||||
func forumTagItemsFromPB(items []*pb.ForumTagItem) []contracts.ForumTagItem {
|
||||
if len(items) == 0 {
|
||||
return []contracts.ForumTagItem{}
|
||||
}
|
||||
result := make([]contracts.ForumTagItem, 0, len(items))
|
||||
for _, item := range items {
|
||||
if item == nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, contracts.ForumTagItem{
|
||||
Tag: item.Tag,
|
||||
PostCount: int(item.PostCount),
|
||||
})
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func forumCommentNodeFromPB(node *pb.ForumCommentNode) contracts.ForumCommentNode {
|
||||
if node == nil {
|
||||
return contracts.ForumCommentNode{}
|
||||
}
|
||||
children := make([]contracts.ForumCommentNode, 0, len(node.Children))
|
||||
for _, child := range node.Children {
|
||||
children = append(children, forumCommentNodeFromPB(child))
|
||||
}
|
||||
return contracts.ForumCommentNode{
|
||||
CommentID: node.CommentId,
|
||||
PostID: node.PostId,
|
||||
ParentCommentID: uint64PtrFromPositive(node.ParentCommentId),
|
||||
Content: node.Content,
|
||||
Status: node.Status,
|
||||
Author: forumUserFromPB(node.Author),
|
||||
CanDelete: node.CanDelete,
|
||||
CreatedAt: node.CreatedAt,
|
||||
DeletedAt: stringPtrFromNonEmpty(node.DeletedAt),
|
||||
Children: children,
|
||||
}
|
||||
}
|
||||
|
||||
func forumCommentNodesFromPB(items []*pb.ForumCommentNode) []contracts.ForumCommentNode {
|
||||
if len(items) == 0 {
|
||||
return []contracts.ForumCommentNode{}
|
||||
}
|
||||
result := make([]contracts.ForumCommentNode, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, forumCommentNodeFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func uint64FromPtr(value *uint64) uint64 {
|
||||
if value == nil {
|
||||
return 0
|
||||
}
|
||||
return *value
|
||||
}
|
||||
|
||||
func uint64PtrFromPositive(value uint64) *uint64 {
|
||||
if value == 0 {
|
||||
return nil
|
||||
}
|
||||
result := value
|
||||
return &result
|
||||
}
|
||||
|
||||
func stringPtrFromNonEmpty(value string) *string {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
return nil
|
||||
}
|
||||
return &trimmed
|
||||
}
|
||||
94
backend/client/taskclassforum/errors.go
Normal file
94
backend/client/taskclassforum/errors.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package taskclassforum
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// responseFromRPCError 把计划广场 zrpc 错误恢复成 HTTP 层可处理的业务错误。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 优先读取 taskclassforum RPC 写入的 ErrorInfo,恢复 respond.Response;
|
||||
// 2. 对网络、超时、服务不可用等非业务错误保留为普通 error,让 HTTP 层按 500 处理;
|
||||
// 3. 暂不复用 userauth/errors.go,因为 user/auth 还承担历史 legacy code 兼容,计划广场只消费新 ErrorInfo 协议。
|
||||
func responseFromRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return wrapRPCError(err)
|
||||
}
|
||||
if resp, ok := responseFromStatusDetails(st); ok {
|
||||
return resp
|
||||
}
|
||||
|
||||
switch st.Code() {
|
||||
case codes.Internal, codes.Unknown, codes.Unavailable, codes.DeadlineExceeded, codes.DataLoss, codes.Unimplemented:
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = "taskclassforum zrpc service internal error"
|
||||
}
|
||||
return wrapRPCError(errors.New(msg))
|
||||
case codes.NotFound:
|
||||
return responseWithFallback(st, respond.UserTaskClassNotFound)
|
||||
case codes.PermissionDenied, codes.Unauthenticated:
|
||||
return responseWithFallback(st, respond.ErrUnauthorized)
|
||||
case codes.InvalidArgument:
|
||||
return responseWithFallback(st, respond.MissingParam)
|
||||
}
|
||||
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = "taskclassforum zrpc service rejected request"
|
||||
}
|
||||
return respond.Response{Status: "400", Info: msg}
|
||||
}
|
||||
|
||||
func responseFromStatusDetails(st *status.Status) (respond.Response, bool) {
|
||||
if st == nil {
|
||||
return respond.Response{}, false
|
||||
}
|
||||
for _, detail := range st.Details() {
|
||||
info, ok := detail.(*errdetails.ErrorInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
statusValue := strings.TrimSpace(info.Reason)
|
||||
if statusValue == "" {
|
||||
return respond.Response{}, false
|
||||
}
|
||||
message := strings.TrimSpace(st.Message())
|
||||
if message == "" && info.Metadata != nil {
|
||||
message = strings.TrimSpace(info.Metadata["info"])
|
||||
}
|
||||
if message == "" {
|
||||
message = statusValue
|
||||
}
|
||||
return respond.Response{Status: statusValue, Info: message}, true
|
||||
}
|
||||
return respond.Response{}, false
|
||||
}
|
||||
|
||||
func responseWithFallback(st *status.Status, fallback respond.Response) respond.Response {
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = fallback.Info
|
||||
}
|
||||
return respond.Response{Status: fallback.Status, Info: msg}
|
||||
}
|
||||
|
||||
func wrapRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("调用 taskclassforum zrpc 服务失败: %w", err)
|
||||
}
|
||||
80
backend/client/tokenstore/client.go
Normal file
80
backend/client/tokenstore/client.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package tokenstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/tokenstore/rpc/pb"
|
||||
"github.com/zeromicro/go-zero/zrpc"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultEndpoint = "127.0.0.1:9095"
|
||||
defaultTimeout = 2 * time.Second
|
||||
)
|
||||
|
||||
type ClientConfig struct {
|
||||
Endpoints []string
|
||||
Target string
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// Client 是 gateway 访问 tokenstore zrpc 的统一 Credit 语义适配层。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责 HTTP gateway 与 tokenstore zrpc 之间的协议转译;
|
||||
// 2. 不直连底层 credit_* 表,也不承载订单/充值/扣费业务规则;
|
||||
// 3. gRPC 业务错误会在这里反解成普通 error / respond.Response,交给 HTTP 层统一处理。
|
||||
type Client struct {
|
||||
rpc pb.TokenStoreServiceClient
|
||||
}
|
||||
|
||||
func NewClient(cfg ClientConfig) (*Client, error) {
|
||||
timeout := cfg.Timeout
|
||||
if timeout <= 0 {
|
||||
timeout = defaultTimeout
|
||||
}
|
||||
endpoints := normalizeEndpoints(cfg.Endpoints)
|
||||
target := strings.TrimSpace(cfg.Target)
|
||||
if len(endpoints) == 0 && target == "" {
|
||||
endpoints = []string{defaultEndpoint}
|
||||
}
|
||||
|
||||
zclient, err := zrpc.NewClient(zrpc.RpcClientConf{
|
||||
Endpoints: endpoints,
|
||||
Target: target,
|
||||
NonBlock: true,
|
||||
Timeout: int64(timeout / time.Millisecond),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Client{rpc: pb.NewTokenStoreServiceClient(zclient.Conn())}, nil
|
||||
}
|
||||
|
||||
func (c *Client) ensureReady() error {
|
||||
if c == nil || c.rpc == nil {
|
||||
return errors.New("tokenstore zrpc client is not initialized")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizeEndpoints(values []string) []string {
|
||||
endpoints := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed != "" {
|
||||
endpoints = append(endpoints, trimmed)
|
||||
}
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
func stringPtrFromNonEmpty(value string) *string {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
return nil
|
||||
}
|
||||
return &trimmed
|
||||
}
|
||||
388
backend/client/tokenstore/credit.go
Normal file
388
backend/client/tokenstore/credit.go
Normal file
@@ -0,0 +1,388 @@
|
||||
package tokenstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/tokenstore/rpc/pb"
|
||||
creditcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/creditstore"
|
||||
)
|
||||
|
||||
func (c *Client) GetCreditBalanceSnapshot(ctx context.Context, userID uint64) (*creditcontracts.CreditBalanceSnapshot, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.GetCreditBalanceSnapshot(ctx, &pb.GetCreditBalanceSnapshotRequest{UserId: userID})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty get credit balance snapshot response")
|
||||
}
|
||||
snapshot := creditBalanceSnapshotFromPB(resp.Snapshot)
|
||||
return &snapshot, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetCreditConsumptionDashboard(ctx context.Context, req creditcontracts.GetCreditConsumptionDashboardRequest) (*creditcontracts.CreditConsumptionDashboardView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.GetCreditConsumptionDashboard(ctx, &pb.GetCreditConsumptionDashboardRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
Period: req.Period,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty get credit consumption dashboard response")
|
||||
}
|
||||
dashboard := creditConsumptionDashboardFromPB(resp.Dashboard)
|
||||
return &dashboard, nil
|
||||
}
|
||||
|
||||
func (c *Client) ListCreditProducts(ctx context.Context, actorUserID uint64) ([]creditcontracts.CreditProductView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.ListCreditProducts(ctx, &pb.ListCreditProductsRequest{ActorUserId: actorUserID})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty list credit products response")
|
||||
}
|
||||
return creditProductsFromPB(resp.Items), nil
|
||||
}
|
||||
|
||||
func (c *Client) CreateCreditOrder(ctx context.Context, req creditcontracts.CreateCreditOrderRequest) (*creditcontracts.CreditOrderView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.CreateCreditOrder(ctx, &pb.CreateCreditOrderRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
ProductId: req.ProductID,
|
||||
Quantity: int32(req.Quantity),
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty create credit order response")
|
||||
}
|
||||
order := creditOrderFromPB(resp.Order)
|
||||
return &order, nil
|
||||
}
|
||||
|
||||
func (c *Client) ListCreditOrders(ctx context.Context, req creditcontracts.ListCreditOrdersRequest) ([]creditcontracts.CreditOrderView, creditcontracts.PageResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, creditcontracts.PageResult{}, err
|
||||
}
|
||||
resp, err := c.rpc.ListCreditOrders(ctx, &pb.ListCreditOrdersRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
Page: int32(req.Page),
|
||||
PageSize: int32(req.PageSize),
|
||||
Status: req.Status,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, creditcontracts.PageResult{}, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, creditcontracts.PageResult{}, errors.New("tokenstore zrpc service returned empty list credit orders response")
|
||||
}
|
||||
return creditOrdersFromPB(resp.Items), creditPageFromPB(resp.Page), nil
|
||||
}
|
||||
|
||||
func (c *Client) GetCreditOrder(ctx context.Context, actorUserID uint64, orderID uint64) (*creditcontracts.CreditOrderView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.GetCreditOrder(ctx, &pb.GetCreditOrderRequest{
|
||||
ActorUserId: actorUserID,
|
||||
OrderId: orderID,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty get credit order response")
|
||||
}
|
||||
order := creditOrderFromPB(resp.Order)
|
||||
return &order, nil
|
||||
}
|
||||
|
||||
func (c *Client) MockPaidCreditOrder(ctx context.Context, req creditcontracts.MockPaidCreditOrderRequest) (*creditcontracts.CreditOrderView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.MockPaidCreditOrder(ctx, &pb.MockPaidCreditOrderRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
OrderId: req.OrderID,
|
||||
MockChannel: req.MockChannel,
|
||||
IdempotencyKey: req.IdempotencyKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty mock paid credit order response")
|
||||
}
|
||||
order := creditOrderFromPB(resp.Order)
|
||||
return &order, nil
|
||||
}
|
||||
|
||||
func (c *Client) ListCreditTransactions(ctx context.Context, req creditcontracts.ListCreditTransactionsRequest) ([]creditcontracts.CreditTransactionView, creditcontracts.PageResult, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, creditcontracts.PageResult{}, err
|
||||
}
|
||||
resp, err := c.rpc.ListCreditTransactions(ctx, &pb.ListCreditTransactionsRequest{
|
||||
ActorUserId: req.ActorUserID,
|
||||
Page: int32(req.Page),
|
||||
PageSize: int32(req.PageSize),
|
||||
Source: req.Source,
|
||||
Direction: req.Direction,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, creditcontracts.PageResult{}, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, creditcontracts.PageResult{}, errors.New("tokenstore zrpc service returned empty list credit transactions response")
|
||||
}
|
||||
return creditTransactionsFromPB(resp.Items), creditPageFromPB(resp.Page), nil
|
||||
}
|
||||
|
||||
func (c *Client) ListCreditPriceRules(ctx context.Context, req creditcontracts.ListCreditPriceRulesRequest) ([]creditcontracts.CreditPriceRuleView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.ListCreditPriceRules(ctx, &pb.ListCreditPriceRulesRequest{
|
||||
Scene: req.Scene,
|
||||
ProviderName: req.ProviderName,
|
||||
ModelName: req.ModelName,
|
||||
Status: req.Status,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty list credit price rules response")
|
||||
}
|
||||
return creditPriceRulesFromPB(resp.Items), nil
|
||||
}
|
||||
|
||||
func (c *Client) ListCreditRewardRules(ctx context.Context, req creditcontracts.ListCreditRewardRulesRequest) ([]creditcontracts.CreditRewardRuleView, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.ListCreditRewardRules(ctx, &pb.ListCreditRewardRulesRequest{
|
||||
Source: req.Source,
|
||||
Status: req.Status,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("tokenstore zrpc service returned empty list credit reward rules response")
|
||||
}
|
||||
return creditRewardRulesFromPB(resp.Items), nil
|
||||
}
|
||||
|
||||
func creditBalanceSnapshotFromPB(snapshot *pb.CreditBalanceSnapshotView) creditcontracts.CreditBalanceSnapshot {
|
||||
if snapshot == nil {
|
||||
return creditcontracts.CreditBalanceSnapshot{}
|
||||
}
|
||||
return creditcontracts.CreditBalanceSnapshot{
|
||||
UserID: snapshot.UserId,
|
||||
Balance: snapshot.Balance,
|
||||
TotalRecharged: snapshot.TotalRecharged,
|
||||
TotalRewarded: snapshot.TotalRewarded,
|
||||
TotalConsumed: snapshot.TotalConsumed,
|
||||
IsBlocked: snapshot.IsBlocked,
|
||||
SnapshotSource: snapshot.SnapshotSource,
|
||||
UpdatedAt: snapshot.UpdatedAt,
|
||||
}
|
||||
}
|
||||
|
||||
func creditConsumptionDashboardFromPB(view *pb.CreditConsumptionDashboardView) creditcontracts.CreditConsumptionDashboardView {
|
||||
if view == nil {
|
||||
return creditcontracts.CreditConsumptionDashboardView{}
|
||||
}
|
||||
return creditcontracts.CreditConsumptionDashboardView{
|
||||
Period: view.Period,
|
||||
CreditConsumed: view.CreditConsumed,
|
||||
TokenConsumed: view.TokenConsumed,
|
||||
}
|
||||
}
|
||||
|
||||
func creditProductFromPB(product *pb.CreditProductView) creditcontracts.CreditProductView {
|
||||
if product == nil {
|
||||
return creditcontracts.CreditProductView{}
|
||||
}
|
||||
return creditcontracts.CreditProductView{
|
||||
ProductID: product.ProductId,
|
||||
Name: product.Name,
|
||||
Description: product.Description,
|
||||
CreditAmount: product.CreditAmount,
|
||||
PriceCent: product.PriceCent,
|
||||
OriginalPriceCent: product.OriginalPriceCent,
|
||||
PriceText: product.PriceText,
|
||||
Currency: product.Currency,
|
||||
Badge: product.Badge,
|
||||
Status: product.Status,
|
||||
SortOrder: int(product.SortOrder),
|
||||
}
|
||||
}
|
||||
|
||||
func creditProductsFromPB(items []*pb.CreditProductView) []creditcontracts.CreditProductView {
|
||||
if len(items) == 0 {
|
||||
return []creditcontracts.CreditProductView{}
|
||||
}
|
||||
result := make([]creditcontracts.CreditProductView, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, creditProductFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func creditOrderFromPB(order *pb.CreditOrderView) creditcontracts.CreditOrderView {
|
||||
if order == nil {
|
||||
return creditcontracts.CreditOrderView{}
|
||||
}
|
||||
return creditcontracts.CreditOrderView{
|
||||
OrderID: order.OrderId,
|
||||
OrderNo: order.OrderNo,
|
||||
Status: order.Status,
|
||||
ProductSnapshot: order.ProductSnapshot,
|
||||
ProductName: order.ProductName,
|
||||
Quantity: int(order.Quantity),
|
||||
CreditAmount: order.CreditAmount,
|
||||
AmountCent: order.AmountCent,
|
||||
PriceText: order.PriceText,
|
||||
Currency: order.Currency,
|
||||
PaymentMode: order.PaymentMode,
|
||||
CreatedAt: order.CreatedAt,
|
||||
PaidAt: stringPtrFromNonEmpty(order.PaidAt),
|
||||
CreditedAt: stringPtrFromNonEmpty(order.CreditedAt),
|
||||
}
|
||||
}
|
||||
|
||||
func creditOrdersFromPB(items []*pb.CreditOrderView) []creditcontracts.CreditOrderView {
|
||||
if len(items) == 0 {
|
||||
return []creditcontracts.CreditOrderView{}
|
||||
}
|
||||
result := make([]creditcontracts.CreditOrderView, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, creditOrderFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func creditTransactionFromPB(item *pb.CreditTransactionView) creditcontracts.CreditTransactionView {
|
||||
if item == nil {
|
||||
return creditcontracts.CreditTransactionView{}
|
||||
}
|
||||
var orderID *uint64
|
||||
if item.OrderId > 0 {
|
||||
value := item.OrderId
|
||||
orderID = &value
|
||||
}
|
||||
return creditcontracts.CreditTransactionView{
|
||||
TransactionID: item.TransactionId,
|
||||
EventID: item.EventId,
|
||||
Source: item.Source,
|
||||
SourceLabel: item.SourceLabel,
|
||||
Direction: item.Direction,
|
||||
Amount: item.Amount,
|
||||
BalanceAfter: item.BalanceAfter,
|
||||
Status: item.Status,
|
||||
Description: item.Description,
|
||||
MetadataJSON: item.MetadataJson,
|
||||
CreatedAt: item.CreatedAt,
|
||||
OrderID: orderID,
|
||||
}
|
||||
}
|
||||
|
||||
func creditTransactionsFromPB(items []*pb.CreditTransactionView) []creditcontracts.CreditTransactionView {
|
||||
if len(items) == 0 {
|
||||
return []creditcontracts.CreditTransactionView{}
|
||||
}
|
||||
result := make([]creditcontracts.CreditTransactionView, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, creditTransactionFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func creditPriceRuleFromPB(item *pb.CreditPriceRuleView) creditcontracts.CreditPriceRuleView {
|
||||
if item == nil {
|
||||
return creditcontracts.CreditPriceRuleView{}
|
||||
}
|
||||
return creditcontracts.CreditPriceRuleView{
|
||||
RuleID: item.RuleId,
|
||||
Scene: item.Scene,
|
||||
ProviderName: item.ProviderName,
|
||||
ModelName: item.ModelName,
|
||||
InputPriceMicros: item.InputPriceMicros,
|
||||
OutputPriceMicros: item.OutputPriceMicros,
|
||||
CachedPriceMicros: item.CachedPriceMicros,
|
||||
ReasoningPriceMicros: item.ReasoningPriceMicros,
|
||||
CreditPerYuan: item.CreditPerYuan,
|
||||
ProfitRateBps: item.ProfitRateBps,
|
||||
ChargeInputPriceMicros: item.ChargeInputPriceMicros,
|
||||
ChargeOutputPriceMicros: item.ChargeOutputPriceMicros,
|
||||
ChargeCachedPriceMicros: item.ChargeCachedPriceMicros,
|
||||
ChargeReasoningPriceMicros: item.ChargeReasoningPriceMicros,
|
||||
Status: item.Status,
|
||||
Priority: int(item.Priority),
|
||||
Description: item.Description,
|
||||
}
|
||||
}
|
||||
|
||||
func creditPriceRulesFromPB(items []*pb.CreditPriceRuleView) []creditcontracts.CreditPriceRuleView {
|
||||
if len(items) == 0 {
|
||||
return []creditcontracts.CreditPriceRuleView{}
|
||||
}
|
||||
result := make([]creditcontracts.CreditPriceRuleView, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, creditPriceRuleFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func creditRewardRuleFromPB(item *pb.CreditRewardRuleView) creditcontracts.CreditRewardRuleView {
|
||||
if item == nil {
|
||||
return creditcontracts.CreditRewardRuleView{}
|
||||
}
|
||||
return creditcontracts.CreditRewardRuleView{
|
||||
RuleID: item.RuleId,
|
||||
Source: item.Source,
|
||||
Name: item.Name,
|
||||
Amount: item.Amount,
|
||||
Status: item.Status,
|
||||
Description: item.Description,
|
||||
}
|
||||
}
|
||||
|
||||
func creditRewardRulesFromPB(items []*pb.CreditRewardRuleView) []creditcontracts.CreditRewardRuleView {
|
||||
if len(items) == 0 {
|
||||
return []creditcontracts.CreditRewardRuleView{}
|
||||
}
|
||||
result := make([]creditcontracts.CreditRewardRuleView, 0, len(items))
|
||||
for _, item := range items {
|
||||
result = append(result, creditRewardRuleFromPB(item))
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func creditPageFromPB(page *pb.PageResponse) creditcontracts.PageResult {
|
||||
if page == nil {
|
||||
return creditcontracts.PageResult{}
|
||||
}
|
||||
return creditcontracts.PageResult{
|
||||
Page: int(page.Page),
|
||||
PageSize: int(page.PageSize),
|
||||
Total: int(page.Total),
|
||||
HasMore: page.HasMore,
|
||||
}
|
||||
}
|
||||
92
backend/client/tokenstore/errors.go
Normal file
92
backend/client/tokenstore/errors.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package tokenstore
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// responseFromRPCError 把 token-store zrpc 错误恢复成 HTTP 层可处理的业务错误。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 优先读取 token-store RPC 写入的 ErrorInfo,恢复 respond.Response;
|
||||
// 2. 对网络、超时、服务不可用等非业务错误保留为普通 error,让 HTTP 层按 500 处理;
|
||||
// 3. 不在这里拼装 HTTP 响应体,handler 仍然统一走 respond.DealWithError。
|
||||
func responseFromRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return wrapRPCError(err)
|
||||
}
|
||||
if resp, ok := responseFromStatusDetails(st); ok {
|
||||
return resp
|
||||
}
|
||||
|
||||
switch st.Code() {
|
||||
case codes.Internal, codes.Unknown, codes.Unavailable, codes.DeadlineExceeded, codes.DataLoss, codes.Unimplemented:
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = "tokenstore zrpc service internal error"
|
||||
}
|
||||
return wrapRPCError(errors.New(msg))
|
||||
case codes.PermissionDenied, codes.Unauthenticated:
|
||||
return responseWithFallback(st, respond.ErrUnauthorized)
|
||||
case codes.InvalidArgument:
|
||||
return responseWithFallback(st, respond.MissingParam)
|
||||
}
|
||||
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = "tokenstore zrpc service rejected request"
|
||||
}
|
||||
return respond.Response{Status: "400", Info: msg}
|
||||
}
|
||||
|
||||
func responseFromStatusDetails(st *status.Status) (respond.Response, bool) {
|
||||
if st == nil {
|
||||
return respond.Response{}, false
|
||||
}
|
||||
for _, detail := range st.Details() {
|
||||
info, ok := detail.(*errdetails.ErrorInfo)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
statusValue := strings.TrimSpace(info.Reason)
|
||||
if statusValue == "" {
|
||||
return respond.Response{}, false
|
||||
}
|
||||
message := strings.TrimSpace(st.Message())
|
||||
if message == "" && info.Metadata != nil {
|
||||
message = strings.TrimSpace(info.Metadata["info"])
|
||||
}
|
||||
if message == "" {
|
||||
message = statusValue
|
||||
}
|
||||
return respond.Response{Status: statusValue, Info: message}, true
|
||||
}
|
||||
return respond.Response{}, false
|
||||
}
|
||||
|
||||
func responseWithFallback(st *status.Status, fallback respond.Response) respond.Response {
|
||||
msg := strings.TrimSpace(st.Message())
|
||||
if msg == "" {
|
||||
msg = fallback.Info
|
||||
}
|
||||
return respond.Response{Status: fallback.Status, Info: msg}
|
||||
}
|
||||
|
||||
func wrapRPCError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("调用 tokenstore zrpc 服务失败: %w", err)
|
||||
}
|
||||
@@ -138,50 +138,6 @@ func (c *Client) ValidateAccessToken(ctx context.Context, accessToken string) (*
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) CheckTokenQuota(ctx context.Context, userID int) (*contracts.CheckTokenQuotaResponse, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.CheckTokenQuota(ctx, &pb.CheckTokenQuotaRequest{
|
||||
UserId: int64(userID),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("userauth zrpc service returned empty quota response")
|
||||
}
|
||||
return &contracts.CheckTokenQuotaResponse{
|
||||
Allowed: resp.Allowed,
|
||||
TokenLimit: int(resp.TokenLimit),
|
||||
TokenUsage: int(resp.TokenUsage),
|
||||
LastResetAt: timeFromUnixNano(resp.LastResetAtUnixNano),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) AdjustTokenUsage(ctx context.Context, req contracts.AdjustTokenUsageRequest) (*contracts.CheckTokenQuotaResponse, error) {
|
||||
if err := c.ensureReady(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp, err := c.rpc.AdjustTokenUsage(ctx, &pb.AdjustTokenUsageRequest{
|
||||
EventId: req.EventID,
|
||||
UserId: int64(req.UserID),
|
||||
TokenDelta: int64(req.TokenDelta),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, responseFromRPCError(err)
|
||||
}
|
||||
if resp == nil {
|
||||
return nil, errors.New("userauth zrpc service returned empty adjust response")
|
||||
}
|
||||
return &contracts.CheckTokenQuotaResponse{
|
||||
Allowed: resp.Allowed,
|
||||
TokenLimit: int(resp.TokenLimit),
|
||||
TokenUsage: int(resp.TokenUsage),
|
||||
LastResetAt: timeFromUnixNano(resp.LastResetAtUnixNano),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) ensureReady() error {
|
||||
if c == nil || c.rpc == nil {
|
||||
return errors.New("userauth zrpc client is not initialized")
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"google.golang.org/genproto/googleapis/rpc/errdetails"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
@@ -7,14 +7,13 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
llmclient "github.com/LoveLosita/smartflow/backend/client/llm"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeschedulerdao "github.com/LoveLosita/smartflow/backend/services/active_scheduler/dao"
|
||||
activeschedulerrpc "github.com/LoveLosita/smartflow/backend/services/active_scheduler/rpc"
|
||||
activeschedulersv "github.com/LoveLosita/smartflow/backend/services/active_scheduler/sv"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -31,16 +30,17 @@ func main() {
|
||||
log.Fatalf("failed to connect active-scheduler database: %v", err)
|
||||
}
|
||||
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize active-scheduler Eino runtime: %v", err)
|
||||
}
|
||||
llmService := llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
llmService, err := llmclient.NewService(llmclient.ServiceConfig{
|
||||
ClientConfig: llmclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("llm.rpc.endpoints"),
|
||||
Target: viper.GetString("llm.rpc.target"),
|
||||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||||
},
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize active-scheduler llm client: %v", err)
|
||||
}
|
||||
|
||||
svc, err := activeschedulersv.New(db, llmService, activeschedulersv.Options{
|
||||
JobScanEvery: viper.GetDuration("activeScheduler.jobScanEvery"),
|
||||
|
||||
@@ -7,8 +7,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
@@ -18,6 +16,8 @@ import (
|
||||
activeTrigger "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
agentstream "github.com/LoveLosita/smartflow/backend/services/agent/stream"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
func buildActiveSchedulePreviewConfirmService(activeDAO *rootdao.ActiveScheduleDAO, dryRun *activesvc.DryRunService, scheduleApplyAdapter interface {
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
agentrpc "github.com/LoveLosita/smartflow/backend/services/agent/rpc"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -5,22 +5,14 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
|
||||
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
rootsvc "github.com/LoveLosita/smartflow/backend/service"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
llmclient "github.com/LoveLosita/smartflow/backend/client/llm"
|
||||
memoryclient "github.com/LoveLosita/smartflow/backend/client/memory"
|
||||
scheduleclient "github.com/LoveLosita/smartflow/backend/client/schedule"
|
||||
taskclient "github.com/LoveLosita/smartflow/backend/client/task"
|
||||
taskclassclient "github.com/LoveLosita/smartflow/backend/client/taskclass"
|
||||
userauthclient "github.com/LoveLosita/smartflow/backend/client/userauth"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
activegraph "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/graph"
|
||||
@@ -35,6 +27,18 @@ import (
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
mysqlinfra "github.com/LoveLosita/smartflow/backend/shared/infra/mysql"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
redisinfra "github.com/LoveLosita/smartflow/backend/shared/infra/redis"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
@@ -48,7 +52,7 @@ type agentRuntime struct {
|
||||
repoManager *rootdao.RepoManager
|
||||
agentRepo *rootdao.AgentDAO
|
||||
cacheRepo *rootdao.CacheDAO
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
userAuthClient *userauthclient.Client
|
||||
service *agentsv.AgentService
|
||||
workersStarted bool
|
||||
}
|
||||
@@ -59,7 +63,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
return nil, fmt.Errorf("connect agent database failed: %w", err)
|
||||
}
|
||||
|
||||
redisClient, err := inits.OpenRedisFromConfig()
|
||||
redisClient, err := redisinfra.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("connect agent redis failed: %w", err)
|
||||
}
|
||||
@@ -69,7 +73,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
}
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err = db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err = db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
return fail(fmt.Errorf("initialize agent cache deleter failed: %w", err))
|
||||
}
|
||||
|
||||
@@ -94,8 +98,9 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
manager := rootdao.NewManager(db)
|
||||
agentRepo := rootdao.NewAgentDAO(db)
|
||||
taskRepo := rootdao.NewTaskDAO(db)
|
||||
taskServiceRepo := taskdao.NewTaskDAO(db)
|
||||
taskClassRepo := rootdao.NewTaskClassDAO(db)
|
||||
scheduleRepo := rootdao.NewScheduleDAO(db)
|
||||
scheduleServiceRepo := scheduledao.NewScheduleDAO(db)
|
||||
agentCacheRepo := rootdao.NewAgentCache(redisClient)
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
|
||||
@@ -110,9 +115,9 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
eventPublisher := buildAgentOutboxPublisher(outboxRepo)
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
|
||||
var userAuthClient *gatewayuserauth.Client
|
||||
var userAuthClient *userauthclient.Client
|
||||
if eventBus != nil {
|
||||
userAuthClient, err = gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
userAuthClient, err = userauthclient.NewClient(userauthclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
|
||||
Target: viper.GetString("userauth.rpc.target"),
|
||||
Timeout: viper.GetDuration("userauth.rpc.timeout"),
|
||||
@@ -122,7 +127,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
}
|
||||
}
|
||||
|
||||
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
|
||||
taskClient, err := taskclient.NewClient(taskclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
@@ -130,7 +135,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task zrpc client failed: %w", err))
|
||||
}
|
||||
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
|
||||
taskClassClient, err := taskclassclient.NewClient(taskclassclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
@@ -138,7 +143,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize task-class zrpc client failed: %w", err))
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
scheduleClient, err := scheduleclient.NewClient(scheduleclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
@@ -146,7 +151,7 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
if err != nil {
|
||||
return fail(fmt.Errorf("initialize schedule zrpc client failed: %w", err))
|
||||
}
|
||||
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
|
||||
memoryClient, err := memoryclient.NewClient(memoryclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
|
||||
Target: viper.GetString("memory.rpc.target"),
|
||||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||||
@@ -155,9 +160,9 @@ func buildAgentRuntime(ctx context.Context) (*agentRuntime, error) {
|
||||
return fail(fmt.Errorf("initialize memory zrpc client failed: %w", err))
|
||||
}
|
||||
|
||||
taskService := rootsvc.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskService := tasksv.NewTaskService(taskServiceRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskService.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := rootsvc.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
scheduleService := schedulesv.NewScheduleService(scheduleServiceRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService := agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
@@ -248,10 +253,6 @@ func (r *agentRuntime) startWorkers(ctx context.Context) error {
|
||||
log.Println("Agent outbox consumer is disabled")
|
||||
return nil
|
||||
}
|
||||
if r.userAuthClient == nil {
|
||||
return fmt.Errorf("agent outbox consumer requires userauth zrpc client")
|
||||
}
|
||||
|
||||
// 1. 先登记 agent 自己消费的 handler,同时补齐 memory.extract.requested 的服务路由。
|
||||
// 2. 这里明确只接 agent 边界;memory 消费仍归 cmd/memory,task 事件仍是 publish-only 写入 task outbox。
|
||||
// 3. 注册完成后再启动总线,避免服务一起来就抢先消费到尚未挂 handler 的消息。
|
||||
@@ -262,7 +263,6 @@ func (r *agentRuntime) startWorkers(ctx context.Context) error {
|
||||
r.agentRepo,
|
||||
r.cacheRepo,
|
||||
nil,
|
||||
r.userAuthClient,
|
||||
); err != nil {
|
||||
return fmt.Errorf("register agent outbox handlers failed: %w", err)
|
||||
}
|
||||
@@ -286,7 +286,7 @@ func (r *agentRuntime) close() {
|
||||
}
|
||||
|
||||
func openAgentDBFromConfig() (*gorm.DB, error) {
|
||||
db, err := inits.OpenDBFromConfig()
|
||||
db, err := mysqlinfra.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -364,16 +364,14 @@ func ensureAgentRuntimeDependencyTables(db *gorm.DB) error {
|
||||
}
|
||||
|
||||
func buildAgentLLMService() (*llmservice.Service, error) {
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
return llmclient.NewService(llmclient.ServiceConfig{
|
||||
ClientConfig: llmclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("llm.rpc.endpoints"),
|
||||
Target: viper.GetString("llm.rpc.target"),
|
||||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||||
},
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
}), nil
|
||||
})
|
||||
}
|
||||
|
||||
func buildAgentRAGService(ctx context.Context) (*ragservice.Service, error) {
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "github.com/LoveLosita/smartflow/backend/cmd"
|
||||
|
||||
func main() {
|
||||
cmd.StartAll()
|
||||
}
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
llmclient "github.com/LoveLosita/smartflow/backend/client/llm"
|
||||
coursedao "github.com/LoveLosita/smartflow/backend/services/course/dao"
|
||||
courserpc "github.com/LoveLosita/smartflow/backend/services/course/rpc"
|
||||
coursesv "github.com/LoveLosita/smartflow/backend/services/course/sv"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -33,15 +33,21 @@ func main() {
|
||||
// 2. scheduleRepo 用于复用既有冲突检查,后续若切 schedule RPC bridge 再替换这里。
|
||||
courseRepo := coursedao.NewCourseDAO(db)
|
||||
scheduleRepo := rootdao.NewScheduleDAO(db)
|
||||
courseImageClient := llmservice.NewArkResponsesClient(
|
||||
os.Getenv("ARK_API_KEY"),
|
||||
viper.GetString("agent.baseURL"),
|
||||
viper.GetString("courseImport.visionModel"),
|
||||
)
|
||||
llmService, err := llmclient.NewService(llmclient.ServiceConfig{
|
||||
ClientConfig: llmclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("llm.rpc.endpoints"),
|
||||
Target: viper.GetString("llm.rpc.target"),
|
||||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||||
},
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize course llm client: %v", err)
|
||||
}
|
||||
svc := coursesv.NewCourseService(
|
||||
courseRepo,
|
||||
scheduleRepo,
|
||||
courseImageClient,
|
||||
llmService.CourseImageResponsesClient(),
|
||||
coursesv.NewCourseImageParseConfig(
|
||||
viper.GetInt64("courseImport.maxImageBytes"),
|
||||
viper.GetInt("courseImport.maxTokens"),
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package inits
|
||||
package coreinit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/spf13/viper"
|
||||
"gorm.io/driver/mysql"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
mysqlinfra "github.com/LoveLosita/smartflow/backend/shared/infra/mysql"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
@@ -102,22 +101,7 @@ func backfillAutoMigrateData(db *gorm.DB) error {
|
||||
// 2. 不负责选择要迁移哪些模型,迁移入口必须由具体服务显式调用;
|
||||
// 3. 调用方负责决定这是单体残留域、user/auth 还是后续新服务的连接。
|
||||
func OpenDBFromConfig() (*gorm.DB, error) {
|
||||
host := viper.GetString("database.host")
|
||||
port := viper.GetString("database.port")
|
||||
user := viper.GetString("database.user")
|
||||
password := viper.GetString("database.password")
|
||||
dbname := viper.GetString("database.dbname")
|
||||
|
||||
dsn := fmt.Sprintf(
|
||||
"%s:%s@tcp(%s:%s)/%s?charset=utf8mb4&parseTime=True&loc=Local",
|
||||
user, password, host, port, dbname,
|
||||
)
|
||||
|
||||
db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
return mysqlinfra.OpenDBFromConfig()
|
||||
}
|
||||
|
||||
// AutoMigrateCoreStorage 执行当前单体残留域拥有的 schema 初始化。
|
||||
@@ -1,11 +1,10 @@
|
||||
package inits
|
||||
package coreinit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
redisinfra "github.com/LoveLosita/smartflow/backend/shared/infra/redis"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
// OpenRedisFromConfig 只创建 Redis client 并做连通性校验。
|
||||
@@ -15,15 +14,7 @@ import (
|
||||
// 2. 不承载 user/auth 黑名单、token 额度等业务语义,那些语义已经收进 userauth 服务;
|
||||
// 3. 返回 error 给服务入口统一处理,避免基础设施包直接 log.Fatal 终止进程。
|
||||
func OpenRedisFromConfig() (*redis.Client, error) {
|
||||
rdb := redis.NewClient(&redis.Options{
|
||||
Addr: viper.GetString("redis.host") + ":" + viper.GetString("redis.port"),
|
||||
Password: viper.GetString("redis.password"),
|
||||
DB: 0,
|
||||
})
|
||||
if _, err := rdb.Ping(context.Background()).Result(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return rdb, nil
|
||||
return redisinfra.OpenRedisFromConfig()
|
||||
}
|
||||
|
||||
// InitCoreRedis 初始化当前单体残留域使用的 Redis 连接。
|
||||
162
backend/cmd/llm/main.go
Normal file
162
backend/cmd/llm/main.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
tokenstoreclient "github.com/LoveLosita/smartflow/backend/client/tokenstore"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
llmdao "github.com/LoveLosita/smartflow/backend/services/llm/dao"
|
||||
llmrpc "github.com/LoveLosita/smartflow/backend/services/llm/rpc"
|
||||
creditcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/creditstore"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
einoinfra "github.com/LoveLosita/smartflow/backend/shared/infra/eino"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
redisinfra "github.com/LoveLosita/smartflow/backend/shared/infra/redis"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := bootstrap.LoadConfig(); err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
db, err := llmdao.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect llm database: %v", err)
|
||||
}
|
||||
|
||||
redisClient, err := redisinfra.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect llm redis: %v", err)
|
||||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
aiHub, err := einoinfra.InitEino()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize llm Eino runtime: %v", err)
|
||||
}
|
||||
|
||||
legacyService := llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
|
||||
balanceSnapshotProvider := &tokenStoreSnapshotProvider{
|
||||
cfg: tokenstoreclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("tokenstore.rpc.endpoints"),
|
||||
Target: viper.GetString("tokenstore.rpc.target"),
|
||||
Timeout: viper.GetDuration("tokenstore.rpc.timeout"),
|
||||
},
|
||||
}
|
||||
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
priceRuleDAO := llmdao.NewPriceRuleDAO(db)
|
||||
dispatchEngine, err := buildLLMOutboxDispatchEngine(outboxRepo)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize llm outbox dispatch engine: %v", err)
|
||||
}
|
||||
if dispatchEngine != nil {
|
||||
dispatchEngine.StartDispatch(ctx)
|
||||
defer dispatchEngine.Close()
|
||||
log.Println("llm outbox dispatch started")
|
||||
} else {
|
||||
log.Println("llm outbox dispatch is disabled")
|
||||
}
|
||||
|
||||
runtimeService, err := llmservice.NewRuntimeService(llmservice.RuntimeServiceOptions{
|
||||
LegacyService: legacyService,
|
||||
CacheDAO: llmdao.NewCacheDAO(redisClient),
|
||||
PriceRuleDAO: priceRuleDAO,
|
||||
SnapshotProvider: balanceSnapshotProvider,
|
||||
OutboxRepo: outboxRepo,
|
||||
OutboxMaxRetry: kafkabus.LoadConfig().MaxRetry,
|
||||
ProviderName: viper.GetString("llm.providerName"),
|
||||
LiteModelName: viper.GetString("agent.liteModel"),
|
||||
ProModelName: viper.GetString("agent.proModel"),
|
||||
MaxModelName: viper.GetString("agent.maxModel"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize llm runtime service: %v", err)
|
||||
}
|
||||
|
||||
server, listenOn, err := llmrpc.NewServer(llmrpc.ServerOptions{
|
||||
ListenOn: viper.GetString("llm.rpc.listenOn"),
|
||||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||||
Service: runtimeService,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build llm zrpc server: %v", err)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
go func() {
|
||||
log.Printf("llm zrpc service starting on %s", listenOn)
|
||||
server.Start()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("llm service stopping")
|
||||
}
|
||||
|
||||
func buildLLMOutboxDispatchEngine(outboxRepo *outboxinfra.Repository) (*outboxinfra.Engine, error) {
|
||||
kafkaCfg := kafkabus.LoadConfig()
|
||||
if !kafkaCfg.Enabled || outboxRepo == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// 1. LLM 进程这里只启动“LLM 自己 outbox 的 dispatch”,不应复用 kafka.LoadConfig() 里的全局默认 topic/group。
|
||||
// 2. 全局默认值当前仍兼容指向 agent outbox;若只改 ServiceName 不同步改 Topic/GroupID,llm.exe 会误入 agent consumer group。
|
||||
// 3. 因此这里显式绑定 llm 服务目录,确保 dispatch engine 只触达 llm_outbox_messages / smartflow.llm.outbox。
|
||||
route, _ := outboxinfra.ResolveServiceRoute(outboxinfra.ServiceLLM)
|
||||
kafkaCfg.ServiceName = route.ServiceName
|
||||
kafkaCfg.Topic = route.Topic
|
||||
kafkaCfg.GroupID = route.GroupID
|
||||
return outboxinfra.NewEngine(outboxRepo.WithRoute(route), kafkaCfg)
|
||||
}
|
||||
|
||||
type tokenStoreSnapshotProvider struct {
|
||||
cfg tokenstoreclient.ClientConfig
|
||||
|
||||
mu sync.Mutex
|
||||
client *tokenstoreclient.Client
|
||||
}
|
||||
|
||||
func (p *tokenStoreSnapshotProvider) GetCreditBalanceSnapshot(ctx context.Context, userID uint64) (*creditcontracts.CreditBalanceSnapshot, error) {
|
||||
client, err := p.ensureClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return client.GetCreditBalanceSnapshot(ctx, userID)
|
||||
}
|
||||
|
||||
func (p *tokenStoreSnapshotProvider) ensureClient() (*tokenstoreclient.Client, error) {
|
||||
if p == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if p.client != nil {
|
||||
return p.client, nil
|
||||
}
|
||||
|
||||
client, err := tokenstoreclient.NewClient(p.cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.client = client
|
||||
return p.client, nil
|
||||
}
|
||||
@@ -8,10 +8,7 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
llmclient "github.com/LoveLosita/smartflow/backend/client/llm"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
memorymodule "github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorydao "github.com/LoveLosita/smartflow/backend/services/memory/dao"
|
||||
@@ -20,6 +17,9 @@ import (
|
||||
memorysv "github.com/LoveLosita/smartflow/backend/services/memory/sv"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -96,20 +96,21 @@ func main() {
|
||||
//
|
||||
// 说明:
|
||||
// 1. CP1 先复用既有 llm-service canonical 入口,不在 memory 服务里重建模型调用封装;
|
||||
// 2. 当前启动入口与 cmd/start.go / cmd/active-scheduler 都需要 Eino 初始化,后续若出现第三处重复装配,应抽公共 bootstrap;
|
||||
// 2. 现在统一改走独立 llm zrpc client,memory 进程不再本地初始化 AIHub;
|
||||
// 3. 返回 ProClient 是因为现有 memory.Module 只需要 llmservice.Client,不需要完整 Service。
|
||||
func buildMemoryLLMClient() (*llmservice.Client, error) {
|
||||
aiHub, err := inits.InitEino()
|
||||
remoteService, err := llmclient.NewService(llmclient.ServiceConfig{
|
||||
ClientConfig: llmclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("llm.rpc.endpoints"),
|
||||
Target: viper.GetString("llm.rpc.target"),
|
||||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||||
},
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
llmService := llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
return llmService.ProClient(), nil
|
||||
return remoteService.ProClient(), nil
|
||||
}
|
||||
|
||||
// buildMemoryRAGRuntime 初始化 memory 检索与向量同步使用的 RAG Runtime。
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
notificationdao "github.com/LoveLosita/smartflow/backend/services/notification/dao"
|
||||
notificationrpc "github.com/LoveLosita/smartflow/backend/services/notification/rpc"
|
||||
notificationsv "github.com/LoveLosita/smartflow/backend/services/notification/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,13 +7,13 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/services/schedule/core/applyadapter"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulerpc "github.com/LoveLosita/smartflow/backend/services/schedule/rpc"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -36,7 +36,7 @@ func main() {
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize schedule cache deleter: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -11,27 +11,21 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
activeschedulerclient "github.com/LoveLosita/smartflow/backend/client/activescheduler"
|
||||
agentclient "github.com/LoveLosita/smartflow/backend/client/agent"
|
||||
courseclient "github.com/LoveLosita/smartflow/backend/client/course"
|
||||
llmclient "github.com/LoveLosita/smartflow/backend/client/llm"
|
||||
memoryclient "github.com/LoveLosita/smartflow/backend/client/memory"
|
||||
notificationclient "github.com/LoveLosita/smartflow/backend/client/notification"
|
||||
scheduleclient "github.com/LoveLosita/smartflow/backend/client/schedule"
|
||||
taskclient "github.com/LoveLosita/smartflow/backend/client/task"
|
||||
taskclassclient "github.com/LoveLosita/smartflow/backend/client/taskclass"
|
||||
taskclassforumclient "github.com/LoveLosita/smartflow/backend/client/taskclassforum"
|
||||
tokenstoreclient "github.com/LoveLosita/smartflow/backend/client/tokenstore"
|
||||
userauthclient "github.com/LoveLosita/smartflow/backend/client/userauth"
|
||||
coreinit "github.com/LoveLosita/smartflow/backend/cmd/internal/coreinit"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||
gatewayactivescheduler "github.com/LoveLosita/smartflow/backend/gateway/client/activescheduler"
|
||||
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
|
||||
gatewaycourse "github.com/LoveLosita/smartflow/backend/gateway/client/course"
|
||||
gatewaymemory "github.com/LoveLosita/smartflow/backend/gateway/client/memory"
|
||||
gatewaynotification "github.com/LoveLosita/smartflow/backend/gateway/client/notification"
|
||||
gatewayschedule "github.com/LoveLosita/smartflow/backend/gateway/client/schedule"
|
||||
gatewaytask "github.com/LoveLosita/smartflow/backend/gateway/client/task"
|
||||
gatewaytaskclass "github.com/LoveLosita/smartflow/backend/gateway/client/taskclass"
|
||||
gatewayuserauth "github.com/LoveLosita/smartflow/backend/gateway/client/userauth"
|
||||
gatewayrouter "github.com/LoveLosita/smartflow/backend/gateway/router"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/inits"
|
||||
"github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
"github.com/LoveLosita/smartflow/backend/service"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
activeadapters "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/adapters"
|
||||
activeapplyadapter "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/applyadapter"
|
||||
activefeedbacklocate "github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/feedbacklocate"
|
||||
@@ -44,12 +38,26 @@ import (
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
agenttools "github.com/LoveLosita/smartflow/backend/services/agent/tools"
|
||||
"github.com/LoveLosita/smartflow/backend/services/agent/tools/web"
|
||||
coursedao "github.com/LoveLosita/smartflow/backend/services/course/dao"
|
||||
coursesv "github.com/LoveLosita/smartflow/backend/services/course/sv"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
"github.com/LoveLosita/smartflow/backend/services/memory"
|
||||
memorymodel "github.com/LoveLosita/smartflow/backend/services/memory/model"
|
||||
memoryobserve "github.com/LoveLosita/smartflow/backend/services/memory/observe"
|
||||
ragservice "github.com/LoveLosita/smartflow/backend/services/rag"
|
||||
ragconfig "github.com/LoveLosita/smartflow/backend/services/rag/config"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/services/runtime/eventsvc"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
scheduledao "github.com/LoveLosita/smartflow/backend/services/schedule/dao"
|
||||
schedulesv "github.com/LoveLosita/smartflow/backend/services/schedule/sv"
|
||||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"github.com/spf13/viper"
|
||||
@@ -66,7 +74,7 @@ const (
|
||||
// 职责边界:
|
||||
// 1. 只负责保存启动期已经装配好的基础设施、仓储、服务和 HTTP handler;
|
||||
// 2. 不承载业务逻辑,业务仍然由 service / agent / memory 等领域模块负责;
|
||||
// 3. 不决定进程角色,api / worker / all 由 StartAPI、StartWorker、StartAll 选择启动哪些生命周期。
|
||||
// 3. 不决定进程角色,api / worker 由 StartAPI、StartWorker 选择启动哪些生命周期;StartAll 仅保留兼容别名。
|
||||
type appRuntime struct {
|
||||
db *gorm.DB
|
||||
redisClient *redis.Client
|
||||
@@ -75,9 +83,11 @@ type appRuntime struct {
|
||||
agentCache *dao.AgentCache
|
||||
manager *dao.RepoManager
|
||||
outboxRepo *outboxinfra.Repository
|
||||
limiter *pkg.RateLimiter
|
||||
limiter *ratelimit.RateLimiter
|
||||
handlers *api.ApiHandlers
|
||||
userAuthClient *gatewayuserauth.Client
|
||||
userAuthClient *userauthclient.Client
|
||||
forumClient *taskclassforumclient.Client
|
||||
tokenClient *tokenstoreclient.Client
|
||||
}
|
||||
|
||||
// loadConfig 锻炼?
|
||||
@@ -85,24 +95,20 @@ func loadConfig() error {
|
||||
return bootstrap.LoadConfig()
|
||||
}
|
||||
|
||||
// Start 保留历史兼容入口,当前默认等价于 StartAll。
|
||||
// Start 保留历史兼容入口,当前默认等价于 StartAPI。
|
||||
// 1. 兼容 backend/main.go 和旧部署命令。
|
||||
// 2. 不新增业务语义,只转发给 StartAll。
|
||||
// 2. 不新增业务语义,只转发给 StartAPI。
|
||||
// 3. 后续若全面切到独立 api/worker 启动,本入口只保留过渡兼容。
|
||||
func Start() {
|
||||
StartAll()
|
||||
StartAPI()
|
||||
}
|
||||
|
||||
// StartAll 启动当前仓库的完整运行态:HTTP API + 后台 worker。
|
||||
// 这仍然是迁移期的兼容装配,不是终态的“一个服务一个 main.go”模型。
|
||||
// StartAll 保留给历史入口与旧命令的兼容别名,当前语义与 StartAPI 完全一致。
|
||||
// 1. cmd/all 已移除,不再作为后端本地启动标准入口。
|
||||
// 2. 之所以暂时保留该函数,是为了避免仓库根兼容入口和旧脚本立刻失效。
|
||||
// 3. 后续若仓库根入口一并收口,可直接删除该兼容别名。
|
||||
func StartAll() {
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
runtime := mustBuildRuntime(ctx)
|
||||
defer runtime.close()
|
||||
|
||||
runtime.startWorkers(ctx)
|
||||
runtime.startHTTP(ctx)
|
||||
StartAPI()
|
||||
}
|
||||
|
||||
// StartAPI 只启动 Gin API 和其同步依赖,不启动后台 worker。
|
||||
@@ -154,23 +160,23 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db, err := inits.ConnectCoreDB()
|
||||
db, err := coreinit.ConnectCoreDB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
rdb, err := inits.InitCoreRedis()
|
||||
rdb, err := coreinit.InitCoreRedis()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to redis: %w", err)
|
||||
}
|
||||
limiter := pkg.NewRateLimiter(rdb)
|
||||
limiter := ratelimit.NewRateLimiter(rdb)
|
||||
|
||||
// DAO 层初始化。
|
||||
cacheRepo := dao.NewCacheDAO(rdb)
|
||||
_ = db.Use(middleware.NewGormCachePlugin(cacheRepo))
|
||||
_ = db.Use(gormcache.NewGormCachePlugin(cacheRepo))
|
||||
|
||||
// Service 层初始化。
|
||||
userAuthClient, err := gatewayuserauth.NewClient(gatewayuserauth.ClientConfig{
|
||||
userAuthClient, err := userauthclient.NewClient(userauthclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("userauth.rpc.endpoints"),
|
||||
Target: viper.GetString("userauth.rpc.target"),
|
||||
Timeout: viper.GetDuration("userauth.rpc.timeout"),
|
||||
@@ -178,7 +184,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize userauth zrpc client: %w", err)
|
||||
}
|
||||
notificationClient, err := gatewaynotification.NewClient(gatewaynotification.ClientConfig{
|
||||
notificationClient, err := notificationclient.NewClient(notificationclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("notification.rpc.endpoints"),
|
||||
Target: viper.GetString("notification.rpc.target"),
|
||||
Timeout: viper.GetDuration("notification.rpc.timeout"),
|
||||
@@ -186,7 +192,23 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize notification zrpc client: %w", err)
|
||||
}
|
||||
scheduleClient, err := gatewayschedule.NewClient(gatewayschedule.ClientConfig{
|
||||
forumClient, err := taskclassforumclient.NewClient(taskclassforumclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskclassforum.rpc.endpoints"),
|
||||
Target: viper.GetString("taskclassforum.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskclassforum.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize taskclassforum zrpc client: %w", err)
|
||||
}
|
||||
tokenClient, err := tokenstoreclient.NewClient(tokenstoreclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("tokenstore.rpc.endpoints"),
|
||||
Target: viper.GetString("tokenstore.rpc.target"),
|
||||
Timeout: viper.GetDuration("tokenstore.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize tokenstore zrpc client: %w", err)
|
||||
}
|
||||
scheduleClient, err := scheduleclient.NewClient(scheduleclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("schedule.rpc.endpoints"),
|
||||
Target: viper.GetString("schedule.rpc.target"),
|
||||
Timeout: viper.GetDuration("schedule.rpc.timeout"),
|
||||
@@ -194,7 +216,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize schedule zrpc client: %w", err)
|
||||
}
|
||||
taskClient, err := gatewaytask.NewClient(gatewaytask.ClientConfig{
|
||||
taskClient, err := taskclient.NewClient(taskclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("task.rpc.endpoints"),
|
||||
Target: viper.GetString("task.rpc.target"),
|
||||
Timeout: viper.GetDuration("task.rpc.timeout"),
|
||||
@@ -202,7 +224,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize task zrpc client: %w", err)
|
||||
}
|
||||
taskClassClient, err := gatewaytaskclass.NewClient(gatewaytaskclass.ClientConfig{
|
||||
taskClassClient, err := taskclassclient.NewClient(taskclassclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
@@ -210,7 +232,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize task-class zrpc client: %w", err)
|
||||
}
|
||||
courseClient, err := gatewaycourse.NewClient(gatewaycourse.ClientConfig{
|
||||
courseClient, err := courseclient.NewClient(courseclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("course.rpc.endpoints"),
|
||||
Target: viper.GetString("course.rpc.target"),
|
||||
Timeout: viper.GetDuration("course.rpc.timeout"),
|
||||
@@ -219,7 +241,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize course zrpc client: %w", err)
|
||||
}
|
||||
memoryClient, err := gatewaymemory.NewClient(gatewaymemory.ClientConfig{
|
||||
memoryClient, err := memoryclient.NewClient(memoryclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("memory.rpc.endpoints"),
|
||||
Target: viper.GetString("memory.rpc.target"),
|
||||
Timeout: viper.GetDuration("memory.rpc.timeout"),
|
||||
@@ -227,7 +249,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize memory zrpc client: %w", err)
|
||||
}
|
||||
agentRPCClient, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
|
||||
agentRPCClient, err := agentclient.NewClient(agentclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
|
||||
Target: viper.GetString("agent.rpc.target"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
@@ -235,7 +257,7 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize agent zrpc client: %w", err)
|
||||
}
|
||||
activeSchedulerClient, err := gatewayactivescheduler.NewClient(gatewayactivescheduler.ClientConfig{
|
||||
activeSchedulerClient, err := activeschedulerclient.NewClient(activeschedulerclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("activeScheduler.rpc.endpoints"),
|
||||
Target: viper.GetString("activeScheduler.rpc.target"),
|
||||
Timeout: viper.GetDuration("activeScheduler.rpc.timeout"),
|
||||
@@ -251,16 +273,17 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
if shouldBuildGatewayAgentFallback() {
|
||||
log.Println("Gateway agent RPC fallback is enabled; building local AgentService compatibility path")
|
||||
|
||||
aiHub, err := inits.InitEino()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize Eino: %w", err)
|
||||
}
|
||||
llmService := llmservice.New(llmservice.Options{
|
||||
AIHub: aiHub,
|
||||
APIKey: os.Getenv("ARK_API_KEY"),
|
||||
BaseURL: viper.GetString("agent.baseURL"),
|
||||
llmService, err := llmclient.NewService(llmclient.ServiceConfig{
|
||||
ClientConfig: llmclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("llm.rpc.endpoints"),
|
||||
Target: viper.GetString("llm.rpc.target"),
|
||||
Timeout: viper.GetDuration("llm.rpc.timeout"),
|
||||
},
|
||||
CourseVisionModel: viper.GetString("courseImport.visionModel"),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize llm zrpc client: %w", err)
|
||||
}
|
||||
|
||||
ragService, err := buildRAGService(ctx)
|
||||
if err != nil {
|
||||
@@ -273,8 +296,9 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
|
||||
agentCacheRepo = dao.NewAgentCache(rdb)
|
||||
taskRepo := dao.NewTaskDAO(db)
|
||||
taskServiceRepo := taskdao.NewTaskDAO(db)
|
||||
taskClassRepo := dao.NewTaskClassDAO(db)
|
||||
scheduleRepo := dao.NewScheduleDAO(db)
|
||||
scheduleServiceRepo := scheduledao.NewScheduleDAO(db)
|
||||
manager = dao.NewManager(db)
|
||||
agentRepo = dao.NewAgentDAO(db)
|
||||
outboxRepo = outboxinfra.NewRepository(db)
|
||||
@@ -286,9 +310,9 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
return nil, fmt.Errorf("failed to register task outbox route: %w", err)
|
||||
}
|
||||
taskOutboxPublisher := buildTaskOutboxPublisher(outboxRepo)
|
||||
taskSv := service.NewTaskService(taskRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskSv := tasksv.NewTaskService(taskServiceRepo, cacheRepo, taskOutboxPublisher)
|
||||
taskSv.SetActiveScheduleDAO(manager.ActiveSchedule)
|
||||
scheduleService := service.NewScheduleService(scheduleRepo, taskClassRepo, manager, cacheRepo)
|
||||
scheduleService := schedulesv.NewScheduleService(scheduleServiceRepo, taskClassRepo, manager, cacheRepo)
|
||||
agentService = agentsv.NewAgentService(
|
||||
llmService,
|
||||
agentRepo,
|
||||
@@ -373,6 +397,8 @@ func buildRuntime(ctx context.Context) (*appRuntime, error) {
|
||||
limiter: limiter,
|
||||
handlers: handlers,
|
||||
userAuthClient: userAuthClient,
|
||||
forumClient: forumClient,
|
||||
tokenClient: tokenClient,
|
||||
}
|
||||
return runtime, nil
|
||||
}
|
||||
@@ -488,13 +514,13 @@ func (p *repositoryOutboxPublisher) Publish(ctx context.Context, req outboxinfra
|
||||
return err
|
||||
}
|
||||
|
||||
func buildCourseService(llmService *llmservice.Service, courseRepo *dao.CourseDAO, scheduleRepo *dao.ScheduleDAO) *service.CourseService {
|
||||
func buildCourseService(llmService *llmservice.Service, courseRepo *coursedao.CourseDAO, scheduleRepo *dao.ScheduleDAO) *coursesv.CourseService {
|
||||
courseImageResponsesClient := llmService.CourseImageResponsesClient()
|
||||
return service.NewCourseService(
|
||||
return coursesv.NewCourseService(
|
||||
courseRepo,
|
||||
scheduleRepo,
|
||||
courseImageResponsesClient,
|
||||
service.NewCourseImageParseConfig(
|
||||
coursesv.NewCourseImageParseConfig(
|
||||
viper.GetInt64("courseImport.maxImageBytes"),
|
||||
viper.GetInt("courseImport.maxTokens"),
|
||||
),
|
||||
@@ -827,7 +853,7 @@ func buildAPIHandlers(
|
||||
courseClient ports.CourseCommandClient,
|
||||
scheduleClient ports.ScheduleCommandClient,
|
||||
agentService *agentsv.AgentService,
|
||||
agentRPCClient *gatewayagent.Client,
|
||||
agentRPCClient *agentclient.Client,
|
||||
memoryClient ports.MemoryCommandClient,
|
||||
activeSchedulerClient ports.ActiveSchedulerCommandClient,
|
||||
notificationClient ports.NotificationCommandClient,
|
||||
@@ -854,7 +880,7 @@ func (r *appRuntime) startWorkers(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (r *appRuntime) startHTTP(ctx context.Context) {
|
||||
router := gatewayrouter.RegisterRouters(r.handlers, r.userAuthClient, r.cacheRepo, r.limiter)
|
||||
router := gatewayrouter.RegisterRouters(r.handlers, r.userAuthClient, r.forumClient, r.tokenClient, r.cacheRepo, r.limiter)
|
||||
gatewayrouter.StartEngine(ctx, router)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,12 +7,12 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
taskclassdao "github.com/LoveLosita/smartflow/backend/services/task_class/dao"
|
||||
taskclassrpc "github.com/LoveLosita/smartflow/backend/services/task_class/rpc"
|
||||
taskclasssv "github.com/LoveLosita/smartflow/backend/services/task_class/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ func main() {
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize task-class cache deleter: %v", err)
|
||||
}
|
||||
|
||||
|
||||
@@ -7,14 +7,14 @@ import (
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
rootdao "github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
taskdao "github.com/LoveLosita/smartflow/backend/services/task/dao"
|
||||
taskrpc "github.com/LoveLosita/smartflow/backend/services/task/rpc"
|
||||
tasksv "github.com/LoveLosita/smartflow/backend/services/task/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
gormcache "github.com/LoveLosita/smartflow/backend/shared/infra/gormcache"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
@@ -37,7 +37,7 @@ func main() {
|
||||
defer redisClient.Close()
|
||||
|
||||
cacheRepo := rootdao.NewCacheDAO(redisClient)
|
||||
if err := db.Use(rootmiddleware.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
if err := db.Use(gormcache.NewGormCachePlugin(cacheRepo)); err != nil {
|
||||
log.Fatalf("failed to initialize task cache deleter: %v", err)
|
||||
}
|
||||
|
||||
|
||||
98
backend/cmd/taskclassforum/main.go
Normal file
98
backend/cmd/taskclassforum/main.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
taskclassclient "github.com/LoveLosita/smartflow/backend/client/taskclass"
|
||||
"github.com/LoveLosita/smartflow/backend/services/taskclassforum/adapter"
|
||||
forumdao "github.com/LoveLosita/smartflow/backend/services/taskclassforum/dao"
|
||||
forumrpc "github.com/LoveLosita/smartflow/backend/services/taskclassforum/rpc"
|
||||
forumsv "github.com/LoveLosita/smartflow/backend/services/taskclassforum/sv"
|
||||
sharedevents "github.com/LoveLosita/smartflow/backend/shared/events"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
redisinfra "github.com/LoveLosita/smartflow/backend/shared/infra/redis"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := bootstrap.LoadConfig(); err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
db, err := forumdao.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect taskclassforum database: %v", err)
|
||||
}
|
||||
if err := registerForumRewardOutboxRoutes(); err != nil {
|
||||
log.Fatalf("failed to register taskclassforum outbox routes: %v", err)
|
||||
}
|
||||
|
||||
taskClassClient, err := taskclassclient.NewClient(taskclassclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("taskClass.rpc.endpoints"),
|
||||
Target: viper.GetString("taskClass.rpc.target"),
|
||||
Timeout: viper.GetDuration("taskClass.rpc.timeout"),
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize task-class zrpc client: %v", err)
|
||||
}
|
||||
|
||||
// 1. 论坛服务只依赖 TaskClass 快照端口,不直接操作 task_classes / task_items 物理表。
|
||||
// 2. 当前实现通过 task-class zrpc 读取/创建副本,保持 dev 主干的微服务边界不回退。
|
||||
// 3. 后续若 task-class 契约扩展,只需替换 adapter 内部映射,不需要改论坛业务层。
|
||||
taskClassPort := adapter.NewTaskClassRPCAdapter(taskClassClient)
|
||||
eventPublisher := outboxinfra.NewRepositoryPublisher(outboxinfra.NewRepository(db), viper.GetInt("kafka.maxRetry"))
|
||||
|
||||
commentTreeCache := forumsv.CommentTreeCachePort(nil)
|
||||
if rdb, redisErr := redisinfra.OpenRedisFromConfig(); redisErr != nil {
|
||||
log.Printf("taskclassforum 评论树缓存已降级关闭,Redis 连接失败: %v", redisErr)
|
||||
} else {
|
||||
defer rdb.Close()
|
||||
commentTreeCache = forumdao.NewCommentTreeCache(rdb)
|
||||
}
|
||||
|
||||
svc := forumsv.New(forumsv.Options{
|
||||
DB: db,
|
||||
TaskClassPort: taskClassPort,
|
||||
EventPublisher: eventPublisher,
|
||||
CommentTreeCache: commentTreeCache,
|
||||
})
|
||||
|
||||
server, listenOn, err := forumrpc.NewServer(forumrpc.ServerOptions{
|
||||
ListenOn: viper.GetString("taskclassforum.rpc.listenOn"),
|
||||
Timeout: viper.GetDuration("taskclassforum.rpc.timeout"),
|
||||
Service: svc,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build taskclassforum zrpc server: %v", err)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
go func() {
|
||||
log.Printf("taskclassforum zrpc service starting on %s", listenOn)
|
||||
server.Start()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("taskclassforum service stopping")
|
||||
}
|
||||
|
||||
// registerForumRewardOutboxRoutes 负责让独立 taskclassforum RPC 进程认识奖励事件的落表归属。
|
||||
//
|
||||
// 步骤说明:
|
||||
// 1. 点赞、导入事件都由 token-store 消费并写 token_grants,所以事件路由归属 token-store;
|
||||
// 2. taskclassforum 进程只负责发布事件,不启动 consumer,也不直接写奖励账本;
|
||||
// 3. 若注册失败直接阻止启动,避免后续点赞/导入看似成功但 outbox 永远无法入队。
|
||||
func registerForumRewardOutboxRoutes() error {
|
||||
if err := outboxinfra.RegisterEventService(sharedevents.ForumPostLikedEventType, outboxinfra.ServiceTokenStore); err != nil {
|
||||
return err
|
||||
}
|
||||
return outboxinfra.RegisterEventService(sharedevents.ForumPostImportedEventType, outboxinfra.ServiceTokenStore)
|
||||
}
|
||||
82
backend/cmd/tokenstore/main.go
Normal file
82
backend/cmd/tokenstore/main.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
tokenstoredao "github.com/LoveLosita/smartflow/backend/services/tokenstore/dao"
|
||||
tokenstorerpc "github.com/LoveLosita/smartflow/backend/services/tokenstore/rpc"
|
||||
tokenstoresv "github.com/LoveLosita/smartflow/backend/services/tokenstore/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/shared/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/shared/infra/outbox"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := bootstrap.LoadConfig(); err != nil {
|
||||
log.Fatalf("failed to load config: %v", err)
|
||||
}
|
||||
|
||||
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
db, err := tokenstoredao.OpenDBFromConfig()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to connect tokenstore database: %v", err)
|
||||
}
|
||||
|
||||
var creditCache *tokenstoredao.CreditCacheDAO
|
||||
rdb, err := tokenstoredao.OpenRedisFromConfig()
|
||||
if err != nil {
|
||||
log.Printf("tokenstore redis is unavailable, credit cache disabled: %v", err)
|
||||
} else {
|
||||
creditCache = tokenstoredao.NewCreditCacheDAO(rdb)
|
||||
log.Println("Tokenstore credit cache enabled")
|
||||
}
|
||||
|
||||
svc := tokenstoresv.New(tokenstoresv.Options{
|
||||
DB: db,
|
||||
CreditCache: creditCache,
|
||||
})
|
||||
|
||||
outboxRepo := outboxinfra.NewRepository(db)
|
||||
eventBus, err := outboxinfra.NewEventBus(outboxRepo, kafkabus.LoadConfig())
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize tokenstore outbox bus: %v", err)
|
||||
}
|
||||
if eventBus != nil {
|
||||
if err := tokenstoresv.RegisterForumRewardHandlers(eventBus, outboxRepo, svc); err != nil {
|
||||
log.Fatalf("failed to register tokenstore outbox handlers: %v", err)
|
||||
}
|
||||
if err := tokenstoresv.RegisterCreditChargeHandlers(eventBus, outboxRepo, svc); err != nil {
|
||||
log.Fatalf("failed to register credit charge handlers: %v", err)
|
||||
}
|
||||
eventBus.Start(ctx)
|
||||
defer eventBus.Close()
|
||||
log.Println("Tokenstore outbox consumer started")
|
||||
} else {
|
||||
log.Println("Tokenstore outbox consumer is disabled")
|
||||
}
|
||||
|
||||
server, listenOn, err := tokenstorerpc.NewServer(tokenstorerpc.ServerOptions{
|
||||
ListenOn: viper.GetString("tokenstore.rpc.listenOn"),
|
||||
Timeout: viper.GetDuration("tokenstore.rpc.timeout"),
|
||||
Service: svc,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatalf("failed to build tokenstore zrpc server: %v", err)
|
||||
}
|
||||
defer server.Stop()
|
||||
|
||||
go func() {
|
||||
log.Printf("tokenstore zrpc service starting on %s", listenOn)
|
||||
server.Start()
|
||||
}()
|
||||
|
||||
<-ctx.Done()
|
||||
log.Println("tokenstore service stopping")
|
||||
}
|
||||
@@ -3,10 +3,10 @@ package main
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/bootstrap"
|
||||
userauthdao "github.com/LoveLosita/smartflow/backend/services/userauth/dao"
|
||||
userauthrpc "github.com/LoveLosita/smartflow/backend/services/userauth/rpc"
|
||||
userauthsv "github.com/LoveLosita/smartflow/backend/services/userauth/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/infra/bootstrap"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
|
||||
260
backend/config.docker.yaml
Normal file
260
backend/config.docker.yaml
Normal file
@@ -0,0 +1,260 @@
|
||||
# SmartFlow 容器化部署配置模板。
|
||||
#
|
||||
# 说明:
|
||||
# 1. 该文件面向 Docker Compose 内部网络,所有依赖地址都改为服务名。
|
||||
# 2. 本地开发仍可继续使用 backend/config.yaml,不与该文件冲突。
|
||||
# 3. 正式部署前请至少替换 JWT 密钥、模型密钥与外部服务凭证。
|
||||
|
||||
server:
|
||||
port: 8080
|
||||
|
||||
database:
|
||||
host: mysql
|
||||
port: 3306
|
||||
user: smartflow_user
|
||||
password: "smartflow_password_456"
|
||||
dbname: "smartflow"
|
||||
|
||||
jwt:
|
||||
accessSecret: "change_me_access_secret"
|
||||
refreshSecret: "change_me_refresh_secret"
|
||||
accessTokenExpire: 15min
|
||||
refreshTokenExpire: 7d
|
||||
|
||||
geetest:
|
||||
captchaID: "80eb5b33de9fba62c69f5e57e36d9638"
|
||||
privateKey: "991f9ad68191cbf6b823257ac67372f3"
|
||||
|
||||
redis:
|
||||
host: redis
|
||||
port: 6379
|
||||
password: "redis_password_789"
|
||||
|
||||
userauth:
|
||||
allowRegister: false
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9081"
|
||||
endpoints:
|
||||
- "userauth:9081"
|
||||
timeout: 2s
|
||||
|
||||
taskclassforum:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9090"
|
||||
endpoints:
|
||||
- "taskclassforum:9090"
|
||||
timeout: 2s
|
||||
|
||||
tokenstore:
|
||||
reward:
|
||||
forumLikeAmount: 1
|
||||
forumImportAmount: 5
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9095"
|
||||
endpoints:
|
||||
- "tokenstore:9095"
|
||||
timeout: 2s
|
||||
|
||||
llm:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9096"
|
||||
endpoints:
|
||||
- "llm:9096"
|
||||
timeout: 0s
|
||||
|
||||
kafka:
|
||||
enabled: true
|
||||
brokers:
|
||||
- "kafka:9092"
|
||||
topic: "smartflow.agent.outbox"
|
||||
groupID: "smartflow-agent-outbox-consumer"
|
||||
retryScanInterval: 1s
|
||||
retryBatchSize: 100
|
||||
maxRetry: 20
|
||||
|
||||
outbox:
|
||||
services:
|
||||
agent:
|
||||
topic: "smartflow.agent.outbox"
|
||||
groupID: "smartflow-agent-outbox-consumer"
|
||||
table: "agent_outbox_messages"
|
||||
task:
|
||||
topic: "smartflow.task.outbox"
|
||||
groupID: "smartflow-task-outbox-consumer"
|
||||
table: "task_outbox_messages"
|
||||
memory:
|
||||
topic: "smartflow.memory.outbox"
|
||||
groupID: "smartflow-memory-outbox-consumer"
|
||||
table: "memory_outbox_messages"
|
||||
active-scheduler:
|
||||
topic: "smartflow.active-scheduler.outbox"
|
||||
groupID: "smartflow-active-scheduler-outbox-consumer"
|
||||
table: "active_scheduler_outbox_messages"
|
||||
notification:
|
||||
topic: "smartflow.notification.outbox"
|
||||
groupID: "smartflow-notification-outbox-consumer"
|
||||
table: "notification_outbox_messages"
|
||||
taskclass-forum:
|
||||
topic: "smartflow.taskclass-forum.outbox"
|
||||
groupID: "smartflow-taskclass-forum-outbox-consumer"
|
||||
table: "taskclass_forum_outbox_messages"
|
||||
llm:
|
||||
topic: "smartflow.llm.outbox"
|
||||
groupID: "smartflow-llm-outbox-consumer"
|
||||
table: "llm_outbox_messages"
|
||||
token-store:
|
||||
topic: "smartflow.token-store.outbox"
|
||||
groupID: "smartflow-token-store-outbox-consumer"
|
||||
table: "token_store_outbox_messages"
|
||||
|
||||
notification:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9082"
|
||||
endpoints:
|
||||
- "notification:9082"
|
||||
timeout: 6s
|
||||
frontendBaseURL: "https://smartflow.example.com"
|
||||
retryScanEvery: 1m
|
||||
retryBatchSize: 50
|
||||
|
||||
cors:
|
||||
allowedOrigins:
|
||||
- "http://localhost:5173"
|
||||
- "https://smartflow.example.com"
|
||||
|
||||
schedule:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9084"
|
||||
endpoints:
|
||||
- "schedule:9084"
|
||||
timeout: 6s
|
||||
|
||||
task:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9085"
|
||||
endpoints:
|
||||
- "task:9085"
|
||||
timeout: 6s
|
||||
|
||||
taskClass:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9086"
|
||||
endpoints:
|
||||
- "task-class:9086"
|
||||
timeout: 6s
|
||||
|
||||
course:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9087"
|
||||
endpoints:
|
||||
- "course:9087"
|
||||
timeout: 5m
|
||||
|
||||
activeScheduler:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9083"
|
||||
endpoints:
|
||||
- "active-scheduler:9083"
|
||||
timeout: 8s
|
||||
jobScanEvery: 1m
|
||||
jobScanLimit: 50
|
||||
|
||||
time:
|
||||
zone: "Asia/Shanghai"
|
||||
semesterStartDate: "2026-03-02"
|
||||
semesterEndDate: "2026-07-19"
|
||||
|
||||
agent:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9089"
|
||||
endpoints:
|
||||
- "agent:9089"
|
||||
timeout: 0s
|
||||
chat:
|
||||
enabled: true
|
||||
api:
|
||||
enabled: true
|
||||
liteModel: "doubao-seed-2-0-code-preview-260215"
|
||||
proModel: "doubao-seed-2-0-code-preview-260215"
|
||||
maxModel: "doubao-seed-2-0-code-preview-260215"
|
||||
baseURL: "https://ark.cn-beijing.volces.com/api/v3"
|
||||
thinking:
|
||||
plan: true
|
||||
execute: true
|
||||
deliver: false
|
||||
memory: false
|
||||
|
||||
courseImport:
|
||||
visionModel: ""
|
||||
maxImageBytes: 5242880
|
||||
maxTokens: 8192
|
||||
|
||||
rag:
|
||||
enabled: true
|
||||
store: "milvus"
|
||||
topK: 8
|
||||
threshold: 0.55
|
||||
retrieve:
|
||||
timeoutMs: 1500
|
||||
ingest:
|
||||
chunkSize: 400
|
||||
chunkOverlap: 80
|
||||
embed:
|
||||
provider: "eino"
|
||||
model: "doubao-embedding-vision-251215"
|
||||
baseURL: "https://ark.cn-beijing.volces.com/api/v3"
|
||||
timeoutMs: 1200
|
||||
dimension: 1024
|
||||
reranker:
|
||||
enabled: false
|
||||
provider: "noop"
|
||||
timeoutMs: 1200
|
||||
milvus:
|
||||
address: "http://milvus-standalone:19530"
|
||||
token: "root:Milvus"
|
||||
dbName: ""
|
||||
collectionName: "smartflow_rag_chunks"
|
||||
metricType: "COSINE"
|
||||
requestTimeoutMs: 1500
|
||||
|
||||
memory:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9088"
|
||||
endpoints:
|
||||
- "memory:9088"
|
||||
timeout: 6s
|
||||
enabled: true
|
||||
rag:
|
||||
enabled: true
|
||||
read:
|
||||
mode: legacy
|
||||
constraintLimit: 5
|
||||
preferenceLimit: 5
|
||||
factLimit: 5
|
||||
inject:
|
||||
renderMode: flat
|
||||
prompt:
|
||||
extract: ""
|
||||
decision: ""
|
||||
threshold: 0.55
|
||||
enableReranker: false
|
||||
llm:
|
||||
temperature: 0.1
|
||||
topP: 0.2
|
||||
job:
|
||||
maxRetry: 6
|
||||
worker:
|
||||
pollEvery: 2s
|
||||
claimBatch: 1
|
||||
decision:
|
||||
enabled: true
|
||||
candidateTopK: 5
|
||||
candidateMinScore: 0.6
|
||||
fallbackMode: legacy_add
|
||||
write:
|
||||
mode: legacy
|
||||
minConfidence: 0.5
|
||||
|
||||
websearch:
|
||||
provider: bocha
|
||||
apiKey: ""
|
||||
@@ -23,6 +23,11 @@ jwt:
|
||||
accessTokenExpire: 15min
|
||||
refreshTokenExpire: 7d
|
||||
|
||||
# 极验行为验证配置。
|
||||
geetest:
|
||||
captchaID: "put_your_geetest_captcha_id_here"
|
||||
privateKey: "put_your_geetest_private_key_here"
|
||||
|
||||
# Redis 配置。
|
||||
redis:
|
||||
host: localhost
|
||||
@@ -31,12 +36,40 @@ redis:
|
||||
|
||||
# user/auth zrpc 独立服务与网关客户端配置。
|
||||
userauth:
|
||||
allowRegister: false
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9081"
|
||||
endpoints:
|
||||
- "127.0.0.1:9081"
|
||||
timeout: 2s
|
||||
|
||||
# 计划广场 zrpc 独立服务与网关客户端配置。
|
||||
taskclassforum:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9090"
|
||||
endpoints:
|
||||
- "127.0.0.1:9090"
|
||||
timeout: 2s
|
||||
|
||||
# Token 商店 zrpc 独立服务与网关客户端配置。
|
||||
tokenstore:
|
||||
reward:
|
||||
forumLikeAmount: 1
|
||||
forumImportAmount: 5
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9095"
|
||||
endpoints:
|
||||
- "127.0.0.1:9095"
|
||||
timeout: 2s
|
||||
|
||||
# LLM zrpc 独立服务与各业务服务客户端配置。
|
||||
llm:
|
||||
rpc:
|
||||
listenOn: "0.0.0.0:9096"
|
||||
endpoints:
|
||||
- "127.0.0.1:9096"
|
||||
timeout: 0s
|
||||
|
||||
# Kafka outbox 事件总线配置。
|
||||
kafka:
|
||||
enabled: true
|
||||
@@ -48,6 +81,41 @@ kafka:
|
||||
retryBatchSize: 100
|
||||
maxRetry: 20
|
||||
|
||||
outbox:
|
||||
services:
|
||||
agent:
|
||||
topic: "smartflow.agent.outbox"
|
||||
groupID: "smartflow-agent-outbox-consumer"
|
||||
table: "agent_outbox_messages"
|
||||
task:
|
||||
topic: "smartflow.task.outbox"
|
||||
groupID: "smartflow-task-outbox-consumer"
|
||||
table: "task_outbox_messages"
|
||||
memory:
|
||||
topic: "smartflow.memory.outbox"
|
||||
groupID: "smartflow-memory-outbox-consumer"
|
||||
table: "memory_outbox_messages"
|
||||
active-scheduler:
|
||||
topic: "smartflow.active-scheduler.outbox"
|
||||
groupID: "smartflow-active-scheduler-outbox-consumer"
|
||||
table: "active_scheduler_outbox_messages"
|
||||
notification:
|
||||
topic: "smartflow.notification.outbox"
|
||||
groupID: "smartflow-notification-outbox-consumer"
|
||||
table: "notification_outbox_messages"
|
||||
taskclass-forum:
|
||||
topic: "smartflow.taskclass-forum.outbox"
|
||||
groupID: "smartflow-taskclass-forum-outbox-consumer"
|
||||
table: "taskclass_forum_outbox_messages"
|
||||
llm:
|
||||
topic: "smartflow.llm.outbox"
|
||||
groupID: "smartflow-llm-outbox-consumer"
|
||||
table: "llm_outbox_messages"
|
||||
token-store:
|
||||
topic: "smartflow.token-store.outbox"
|
||||
groupID: "smartflow-token-store-outbox-consumer"
|
||||
table: "token_store_outbox_messages"
|
||||
|
||||
# 通知投递配置。
|
||||
notification:
|
||||
rpc:
|
||||
@@ -89,7 +157,7 @@ course:
|
||||
listenOn: "0.0.0.0:9087"
|
||||
endpoints:
|
||||
- "127.0.0.1:9087"
|
||||
timeout: 10s
|
||||
timeout: 5m
|
||||
|
||||
# 主动调度服务配置。
|
||||
activeScheduler:
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/activescheduler"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -11,10 +11,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
gatewayagent "github.com/LoveLosita/smartflow/backend/gateway/client/agent"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
agentclient "github.com/LoveLosita/smartflow/backend/client/agent"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
agentsv "github.com/LoveLosita/smartflow/backend/services/agent/sv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
agentcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/agent"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/google/uuid"
|
||||
@@ -30,7 +30,7 @@ const (
|
||||
|
||||
type AgentHandler struct {
|
||||
svc *agentsv.AgentService
|
||||
rpcClient *gatewayagent.Client
|
||||
rpcClient *agentclient.Client
|
||||
rpcClientMu sync.Mutex
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func NewAgentHandler(svc *agentsv.AgentService) *AgentHandler {
|
||||
// 2. agent RPC 作为 chat stream 与非 chat /agent/* 查询/命令的服务间通道;
|
||||
// 3. svc 只用于 RPC 开关关闭时的迁移期 fallback,当前默认可为 nil;
|
||||
// 4. rpcClient 为空时允许按配置懒加载,避免测试和旧装配必须提前构造 client。
|
||||
func NewAgentHandlerWithRPC(svc *agentsv.AgentService, rpcClient *gatewayagent.Client) *AgentHandler {
|
||||
func NewAgentHandlerWithRPC(svc *agentsv.AgentService, rpcClient *agentclient.Client) *AgentHandler {
|
||||
return &AgentHandler{
|
||||
svc: svc,
|
||||
rpcClient: rpcClient,
|
||||
@@ -302,7 +302,7 @@ func writeAgentSSEError(w io.Writer, err error) {
|
||||
_ = writeSSEData(w, "[DONE]")
|
||||
}
|
||||
|
||||
func (api *AgentHandler) getAgentRPCClient() (*gatewayagent.Client, error) {
|
||||
func (api *AgentHandler) getAgentRPCClient() (*agentclient.Client, error) {
|
||||
if api == nil {
|
||||
return nil, errors.New("agent handler is not initialized")
|
||||
}
|
||||
@@ -314,7 +314,7 @@ func (api *AgentHandler) getAgentRPCClient() (*gatewayagent.Client, error) {
|
||||
return api.rpcClient, nil
|
||||
}
|
||||
|
||||
client, err := gatewayagent.NewClient(gatewayagent.ClientConfig{
|
||||
client, err := agentclient.NewClient(agentclient.ClientConfig{
|
||||
Endpoints: viper.GetStringSlice("agent.rpc.endpoints"),
|
||||
Target: viper.GetString("agent.rpc.target"),
|
||||
Timeout: viper.GetDuration("agent.rpc.timeout"),
|
||||
|
||||
@@ -9,13 +9,14 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
coursecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/course"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const courseRequestTimeout = 10 * time.Second
|
||||
// 课表导入与校验可能涉及较多课程展开与冲突检测,统一放宽到 5 分钟,避免网关提前超时。
|
||||
const courseRequestTimeout = 5 * time.Minute
|
||||
|
||||
type CourseHandler struct {
|
||||
client ports.CourseCommandClient
|
||||
@@ -116,6 +117,7 @@ func (sa *CourseHandler) ParseCourseTableImage(c *gin.Context) {
|
||||
defer cancel()
|
||||
|
||||
rawDraft, err := sa.client.ParseCourseTableImage(ctx, coursecontracts.CourseImageParseRequest{
|
||||
UserID: userID,
|
||||
Filename: fileHeader.Filename,
|
||||
MIMEType: fileHeader.Header.Get("Content-Type"),
|
||||
ImageBytes: imageBytes,
|
||||
|
||||
434
backend/gateway/api/forumapi/handler.go
Normal file
434
backend/gateway/api/forumapi/handler.go
Normal file
@@ -0,0 +1,434 @@
|
||||
package forumapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclassforum"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const (
|
||||
requestTimeout = 2 * time.Second
|
||||
forumLikeRewardAmount = int64(1)
|
||||
forumImportRewardAmount = int64(5)
|
||||
rewardHintStatusActive = "rule_active"
|
||||
)
|
||||
|
||||
type ForumClient interface {
|
||||
ListPosts(ctx context.Context, actorUserID uint64, page int, pageSize int, sort string, keyword string, tag string) ([]contracts.ForumPostBrief, contracts.PageResult, error)
|
||||
ListTags(ctx context.Context, actorUserID uint64, limit int) ([]contracts.ForumTagItem, error)
|
||||
CreatePost(ctx context.Context, req contracts.CreateForumPostRequest) (*contracts.ForumPostBrief, error)
|
||||
GetPost(ctx context.Context, actorUserID uint64, postID uint64) (*contracts.ForumPostDetail, error)
|
||||
LikePost(ctx context.Context, actorUserID uint64, postID uint64) (contracts.ForumPostCounters, contracts.ForumPostViewerState, error)
|
||||
UnlikePost(ctx context.Context, actorUserID uint64, postID uint64) (contracts.ForumPostCounters, contracts.ForumPostViewerState, error)
|
||||
ListComments(ctx context.Context, actorUserID uint64, postID uint64, page int, pageSize int, sort string) ([]contracts.ForumCommentNode, contracts.PageResult, error)
|
||||
CreateComment(ctx context.Context, req contracts.CreateForumCommentRequest) (*contracts.ForumCommentNode, error)
|
||||
DeleteComment(ctx context.Context, actorUserID uint64, commentID uint64) (*contracts.DeleteForumCommentResult, error)
|
||||
ImportPost(ctx context.Context, req contracts.ImportForumPostRequest) (*contracts.ImportForumPostResult, error)
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
client ForumClient
|
||||
}
|
||||
|
||||
func NewHandler(client ForumClient) *Handler {
|
||||
return &Handler{client: client}
|
||||
}
|
||||
|
||||
type pageEnvelope[T any] struct {
|
||||
Items []T `json:"items"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
Total int `json:"total"`
|
||||
HasMore bool `json:"has_more"`
|
||||
}
|
||||
|
||||
type interactionEnvelope struct {
|
||||
PostID uint64 `json:"post_id"`
|
||||
Liked bool `json:"liked"`
|
||||
LikeCount int64 `json:"like_count"`
|
||||
RewardHint *rewardHint `json:"reward_hint,omitempty"`
|
||||
}
|
||||
|
||||
type rewardHint struct {
|
||||
Receiver string `json:"receiver"`
|
||||
Status string `json:"status"`
|
||||
Amount int64 `json:"amount"`
|
||||
}
|
||||
|
||||
type nextAction struct {
|
||||
Type string `json:"type"`
|
||||
TaskClassID uint64 `json:"task_class_id"`
|
||||
}
|
||||
|
||||
type importEnvelope struct {
|
||||
ImportID uint64 `json:"import_id"`
|
||||
PostID uint64 `json:"post_id"`
|
||||
NewTaskClassID uint64 `json:"new_task_class_id"`
|
||||
TaskClassTitle string `json:"task_class_title"`
|
||||
ImportCount int64 `json:"import_count"`
|
||||
RewardHint rewardHint `json:"reward_hint"`
|
||||
NextAction nextAction `json:"next_action"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
}
|
||||
|
||||
type deleteCommentEnvelope struct {
|
||||
CommentID uint64 `json:"comment_id"`
|
||||
Status string `json:"status"`
|
||||
Content string `json:"content"`
|
||||
DeletedAt *string `json:"deleted_at"`
|
||||
}
|
||||
|
||||
type createPostBody struct {
|
||||
TaskClassID uint64 `json:"task_class_id"`
|
||||
Title string `json:"title"`
|
||||
Summary string `json:"summary"`
|
||||
Tags []string `json:"tags"`
|
||||
}
|
||||
|
||||
type createCommentBody struct {
|
||||
Content string `json:"content"`
|
||||
ParentCommentID *uint64 `json:"parent_comment_id"`
|
||||
}
|
||||
|
||||
type importPostBody struct {
|
||||
TargetTitle string `json:"target_title"`
|
||||
}
|
||||
|
||||
func (h *Handler) ListPosts(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
pageValue, ok := intQuery(c, "page")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
pageSize, ok := intQuery(c, "page_size")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
items, page, err := client.ListPosts(
|
||||
ctx,
|
||||
currentUserID(c),
|
||||
pageValue,
|
||||
pageSize,
|
||||
c.Query("sort"),
|
||||
c.Query("keyword"),
|
||||
c.Query("tag"),
|
||||
)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, newPageEnvelope(items, page)))
|
||||
}
|
||||
|
||||
func (h *Handler) ListTags(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
limit, ok := intQuery(c, "limit")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
items, err := client.ListTags(ctx, currentUserID(c), limit)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, gin.H{"items": items}))
|
||||
}
|
||||
|
||||
func (h *Handler) CreatePost(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var body createPostBody
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
post, err := client.CreatePost(ctx, contracts.CreateForumPostRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
TaskClassID: body.TaskClassID,
|
||||
Title: body.Title,
|
||||
Summary: body.Summary,
|
||||
Tags: append([]string(nil), body.Tags...),
|
||||
IdempotencyKey: strings.TrimSpace(c.GetHeader("X-Idempotency-Key")),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, post))
|
||||
}
|
||||
|
||||
func (h *Handler) GetPost(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
postID, ok := uint64Param(c, "post_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
detail, err := client.GetPost(ctx, currentUserID(c), postID)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, detail))
|
||||
}
|
||||
|
||||
func (h *Handler) LikePost(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
postID, ok := uint64Param(c, "post_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
counters, state, err := client.LikePost(ctx, currentUserID(c), postID)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, interactionEnvelope{
|
||||
PostID: postID,
|
||||
Liked: state.Liked,
|
||||
LikeCount: counters.LikeCount,
|
||||
RewardHint: &rewardHint{
|
||||
Receiver: "author",
|
||||
Status: rewardHintStatusActive,
|
||||
Amount: forumLikeRewardAmount,
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
func (h *Handler) UnlikePost(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
postID, ok := uint64Param(c, "post_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
counters, state, err := client.UnlikePost(ctx, currentUserID(c), postID)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, interactionEnvelope{
|
||||
PostID: postID,
|
||||
Liked: state.Liked,
|
||||
LikeCount: counters.LikeCount,
|
||||
}))
|
||||
}
|
||||
|
||||
func (h *Handler) ListComments(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
postID, ok := uint64Param(c, "post_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
pageValue, ok := intQuery(c, "page")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
pageSize, ok := intQuery(c, "page_size")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
items, page, err := client.ListComments(ctx, currentUserID(c), postID, pageValue, pageSize, c.Query("sort"))
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, newPageEnvelope(items, page)))
|
||||
}
|
||||
|
||||
func (h *Handler) CreateComment(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
postID, ok := uint64Param(c, "post_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var body createCommentBody
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
comment, err := client.CreateComment(ctx, contracts.CreateForumCommentRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
PostID: postID,
|
||||
Content: body.Content,
|
||||
ParentCommentID: body.ParentCommentID,
|
||||
IdempotencyKey: strings.TrimSpace(c.GetHeader("X-Idempotency-Key")),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, comment))
|
||||
}
|
||||
|
||||
func (h *Handler) DeleteComment(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
commentID, ok := uint64Param(c, "comment_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
result, err := client.DeleteComment(ctx, currentUserID(c), commentID)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, deleteCommentEnvelope{
|
||||
CommentID: result.CommentID,
|
||||
Status: result.Status,
|
||||
Content: result.Content,
|
||||
DeletedAt: result.DeletedAt,
|
||||
}))
|
||||
}
|
||||
|
||||
func (h *Handler) ImportPost(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
postID, ok := uint64Param(c, "post_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var body importPostBody
|
||||
if err := c.ShouldBindJSON(&body); err != nil && !errors.Is(err, io.EOF) {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
result, err := client.ImportPost(ctx, contracts.ImportForumPostRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
PostID: postID,
|
||||
TargetTitle: body.TargetTitle,
|
||||
IdempotencyKey: strings.TrimSpace(c.GetHeader("X-Idempotency-Key")),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, importEnvelope{
|
||||
ImportID: result.ImportID,
|
||||
PostID: result.PostID,
|
||||
NewTaskClassID: result.NewTaskClassID,
|
||||
TaskClassTitle: result.TaskClassTitle,
|
||||
ImportCount: result.ImportCount,
|
||||
RewardHint: rewardHint{
|
||||
Receiver: "author",
|
||||
Status: rewardHintStatusActive,
|
||||
Amount: forumImportRewardAmount,
|
||||
},
|
||||
NextAction: nextAction{
|
||||
Type: "open_task_class",
|
||||
TaskClassID: result.NewTaskClassID,
|
||||
},
|
||||
CreatedAt: result.CreatedAt,
|
||||
}))
|
||||
}
|
||||
|
||||
func (h *Handler) ready(c *gin.Context) (ForumClient, bool) {
|
||||
if h == nil || h.client == nil {
|
||||
c.JSON(http.StatusInternalServerError, respond.InternalError(errors.New("计划广场 gateway client 未初始化")))
|
||||
return nil, false
|
||||
}
|
||||
return h.client, true
|
||||
}
|
||||
|
||||
func currentUserID(c *gin.Context) uint64 {
|
||||
userID := c.GetInt("user_id")
|
||||
if userID <= 0 {
|
||||
return 0
|
||||
}
|
||||
return uint64(userID)
|
||||
}
|
||||
|
||||
func newPageEnvelope[T any](items []T, page contracts.PageResult) pageEnvelope[T] {
|
||||
return pageEnvelope[T]{
|
||||
Items: items,
|
||||
Page: page.Page,
|
||||
PageSize: page.PageSize,
|
||||
Total: page.Total,
|
||||
HasMore: page.HasMore,
|
||||
}
|
||||
}
|
||||
|
||||
func intQuery(c *gin.Context, key string) (int, bool) {
|
||||
raw := strings.TrimSpace(c.Query(key))
|
||||
if raw == "" {
|
||||
return 0, true
|
||||
}
|
||||
value, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return 0, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
func uint64Param(c *gin.Context, key string) (uint64, bool) {
|
||||
value, err := strconv.ParseUint(strings.TrimSpace(c.Param(key)), 10, 64)
|
||||
if err != nil || value == 0 {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return 0, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
87
backend/gateway/api/forumapi/routes.go
Normal file
87
backend/gateway/api/forumapi/routes.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package forumapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// RegisterRoutes 把计划广场 HTTP 入口挂到 gateway 路由组。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只注册 /plan-square 下的边缘路由,不承载论坛业务规则;
|
||||
// 2. 公开读接口允许匿名访问,若携带 token 则补齐 viewer_state;
|
||||
// 3. 写接口必须登录,并按既有 Redis 幂等中间件保护重复提交。
|
||||
func RegisterRoutes(apiGroup *gin.RouterGroup, handler *Handler, authClient ports.AccessTokenValidator, cache *dao.CacheDAO, limiter *ratelimit.RateLimiter) {
|
||||
if apiGroup == nil || handler == nil {
|
||||
return
|
||||
}
|
||||
|
||||
planSquare := apiGroup.Group("/plan-square")
|
||||
{
|
||||
publicGroup := planSquare.Group("")
|
||||
publicGroup.Use(optionalJWTTokenAuth(authClient), rootmiddleware.RateLimitMiddleware(limiter, 40, 1))
|
||||
publicGroup.GET("/posts", handler.ListPosts)
|
||||
publicGroup.GET("/tags", handler.ListTags)
|
||||
publicGroup.GET("/posts/:post_id", handler.GetPost)
|
||||
publicGroup.GET("/posts/:post_id/comments", handler.ListComments)
|
||||
|
||||
writeGroup := planSquare.Group("")
|
||||
writeGroup.Use(gatewaymiddleware.JWTTokenAuth(authClient), rootmiddleware.RateLimitMiddleware(limiter, 20, 1))
|
||||
writeGroup.POST("/posts", rootmiddleware.IdempotencyMiddleware(cache), handler.CreatePost)
|
||||
writeGroup.POST("/posts/:post_id/like", handler.LikePost)
|
||||
writeGroup.DELETE("/posts/:post_id/like", handler.UnlikePost)
|
||||
writeGroup.POST("/posts/:post_id/comments", rootmiddleware.IdempotencyMiddleware(cache), handler.CreateComment)
|
||||
writeGroup.DELETE("/comments/:comment_id", handler.DeleteComment)
|
||||
writeGroup.POST("/posts/:post_id/import", rootmiddleware.IdempotencyMiddleware(cache), handler.ImportPost)
|
||||
}
|
||||
}
|
||||
|
||||
// optionalJWTTokenAuth 为计划广场公开读接口提供“可登录增强”。
|
||||
//
|
||||
// 步骤说明:
|
||||
// 1. 没有 Authorization 时直接放行,让匿名用户也能浏览计划广场;
|
||||
// 2. 有 Authorization 时复用 user/auth 校验,并把 user_id 写入上下文;
|
||||
// 3. token 非法时按正常鉴权失败返回,避免前端误以为已登录状态仍可用。
|
||||
func optionalJWTTokenAuth(validator ports.AccessTokenValidator) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
tokenString := gatewaymiddleware.ExtractTokenFromAuthorization(c.GetHeader("Authorization"))
|
||||
if tokenString == "" {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
if validator == nil {
|
||||
c.JSON(http.StatusInternalServerError, respond.InternalError(errors.New("计划广场可选鉴权依赖未初始化")))
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := validator.ValidateAccessToken(ctx, tokenString)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if resp == nil || !resp.Valid || resp.UserID <= 0 {
|
||||
c.JSON(http.StatusUnauthorized, respond.InvalidClaims)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
c.Set("user_id", resp.UserID)
|
||||
c.Set("claims", resp)
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
memorycontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/memory"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/notification"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
schedulecontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/schedule"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
taskclasscontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/taskclass"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
taskcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/task"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
615
backend/gateway/api/tokenstoreapi/handler.go
Normal file
615
backend/gateway/api/tokenstoreapi/handler.go
Normal file
@@ -0,0 +1,615 @@
|
||||
package tokenstoreapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
creditcontracts "github.com/LoveLosita/smartflow/backend/shared/contracts/creditstore"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
const requestTimeout = 5 * time.Second
|
||||
|
||||
const (
|
||||
creditConsumptionPeriod24h = "24h"
|
||||
creditConsumptionPeriod7d = "7d"
|
||||
creditConsumptionPeriod30d = "30d"
|
||||
creditConsumptionPeriodAll = "all"
|
||||
creditDashboardPageSize = 50
|
||||
)
|
||||
|
||||
// TokenStoreClient 是商店页 credit-store 语义所需的最小依赖面。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只暴露 Credit 商店和流水所需能力,不再承接旧 token 商店接口。
|
||||
// 2. 所有方法都以“当前登录用户”口径访问,不开放跨用户查询。
|
||||
// 3. 具体 DB、RPC、Redis 细节统一封装在 tokenstore client 内。
|
||||
type TokenStoreClient interface {
|
||||
GetCreditBalanceSnapshot(ctx context.Context, userID uint64) (*creditcontracts.CreditBalanceSnapshot, error)
|
||||
GetCreditConsumptionDashboard(ctx context.Context, req creditcontracts.GetCreditConsumptionDashboardRequest) (*creditcontracts.CreditConsumptionDashboardView, error)
|
||||
ListCreditProducts(ctx context.Context, actorUserID uint64) ([]creditcontracts.CreditProductView, error)
|
||||
CreateCreditOrder(ctx context.Context, req creditcontracts.CreateCreditOrderRequest) (*creditcontracts.CreditOrderView, error)
|
||||
ListCreditOrders(ctx context.Context, req creditcontracts.ListCreditOrdersRequest) ([]creditcontracts.CreditOrderView, creditcontracts.PageResult, error)
|
||||
GetCreditOrder(ctx context.Context, actorUserID uint64, orderID uint64) (*creditcontracts.CreditOrderView, error)
|
||||
MockPaidCreditOrder(ctx context.Context, req creditcontracts.MockPaidCreditOrderRequest) (*creditcontracts.CreditOrderView, error)
|
||||
ListCreditTransactions(ctx context.Context, req creditcontracts.ListCreditTransactionsRequest) ([]creditcontracts.CreditTransactionView, creditcontracts.PageResult, error)
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
client TokenStoreClient
|
||||
}
|
||||
|
||||
func NewHandler(client TokenStoreClient) *Handler {
|
||||
return &Handler{client: client}
|
||||
}
|
||||
|
||||
type pageEnvelope[T any] struct {
|
||||
Items []T `json:"items"`
|
||||
Page int `json:"page"`
|
||||
PageSize int `json:"page_size"`
|
||||
Total int `json:"total"`
|
||||
HasMore bool `json:"has_more"`
|
||||
}
|
||||
|
||||
type creditSummaryEnvelope struct {
|
||||
CurrentCreditTotal int64 `json:"current_credit_total"`
|
||||
RecordedCreditTotal int64 `json:"recorded_credit_total"`
|
||||
AppliedCreditTotal int64 `json:"applied_credit_total"`
|
||||
PendingApplyCreditTotal int64 `json:"pending_apply_credit_total"`
|
||||
ValidUntil *string `json:"valid_until"`
|
||||
QuotaSyncStatus string `json:"quota_sync_status"`
|
||||
Tip string `json:"tip"`
|
||||
}
|
||||
|
||||
type paymentAction struct {
|
||||
Type string `json:"type"`
|
||||
Label string `json:"label"`
|
||||
}
|
||||
|
||||
type creditOrderEnvelope struct {
|
||||
OrderID uint64 `json:"order_id"`
|
||||
OrderNo string `json:"order_no"`
|
||||
Status string `json:"status"`
|
||||
Quantity int `json:"quantity"`
|
||||
CreditAmount int64 `json:"credit_amount"`
|
||||
AmountCent int64 `json:"amount_cent"`
|
||||
PriceText string `json:"price_text"`
|
||||
Currency string `json:"currency"`
|
||||
PaymentMode string `json:"payment_mode"`
|
||||
ProductName string `json:"product_name"`
|
||||
ProductDetail map[string]any `json:"product_snapshot,omitempty"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
PaidAt *string `json:"paid_at"`
|
||||
CreditedAt *string `json:"credited_at"`
|
||||
PaymentAction paymentAction `json:"payment_action"`
|
||||
}
|
||||
|
||||
type creditTransactionEnvelope struct {
|
||||
GrantID uint64 `json:"grant_id"`
|
||||
SourceLabel string `json:"source_label"`
|
||||
Amount int64 `json:"amount"`
|
||||
Status string `json:"status"`
|
||||
Description string `json:"description"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
Direction string `json:"direction"`
|
||||
BalanceAfter int64 `json:"balance_after"`
|
||||
EventID string `json:"event_id"`
|
||||
OrderID *uint64 `json:"order_id"`
|
||||
}
|
||||
|
||||
type createOrderBody struct {
|
||||
ProductID uint64 `json:"product_id"`
|
||||
Quantity int `json:"quantity"`
|
||||
}
|
||||
|
||||
type mockPaidBody struct {
|
||||
MockChannel string `json:"mock_channel"`
|
||||
}
|
||||
|
||||
func (h *Handler) GetSummary(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
snapshot, err := client.GetCreditBalanceSnapshot(ctx, currentUserID(c))
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, buildCreditSummaryEnvelope(snapshot)))
|
||||
}
|
||||
|
||||
func (h *Handler) GetConsumptionDashboard(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
dashboard, err := client.GetCreditConsumptionDashboard(ctx, creditcontracts.GetCreditConsumptionDashboardRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
Period: c.Query("period"),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, dashboard))
|
||||
}
|
||||
|
||||
func (h *Handler) ListProducts(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
items, err := client.ListCreditProducts(ctx, currentUserID(c))
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, gin.H{"items": items}))
|
||||
}
|
||||
|
||||
func (h *Handler) CreateOrder(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var body createOrderBody
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
order, err := client.CreateCreditOrder(ctx, creditcontracts.CreateCreditOrderRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
ProductID: body.ProductID,
|
||||
Quantity: body.Quantity,
|
||||
IdempotencyKey: strings.TrimSpace(c.GetHeader("X-Idempotency-Key")),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, buildCreditOrderEnvelope(order)))
|
||||
}
|
||||
|
||||
func (h *Handler) ListOrders(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
pageValue, ok := intQuery(c, "page")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
pageSize, ok := intQuery(c, "page_size")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
items, page, err := client.ListCreditOrders(ctx, creditcontracts.ListCreditOrdersRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
Page: pageValue,
|
||||
PageSize: pageSize,
|
||||
Status: c.Query("status"),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
result := make([]creditOrderEnvelope, 0, len(items))
|
||||
for i := range items {
|
||||
item := items[i]
|
||||
result = append(result, buildCreditOrderEnvelope(&item))
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, newPageEnvelope(result, page)))
|
||||
}
|
||||
|
||||
func (h *Handler) GetOrder(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
orderID, ok := uint64Param(c, "order_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
order, err := client.GetCreditOrder(ctx, currentUserID(c), orderID)
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, buildCreditOrderEnvelope(order)))
|
||||
}
|
||||
|
||||
func (h *Handler) MockPaidOrder(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
orderID, ok := uint64Param(c, "order_id")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
var body mockPaidBody
|
||||
if err := c.ShouldBindJSON(&body); err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
order, err := client.MockPaidCreditOrder(ctx, creditcontracts.MockPaidCreditOrderRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
OrderID: orderID,
|
||||
MockChannel: firstNonEmptyString(strings.TrimSpace(body.MockChannel), "mock"),
|
||||
IdempotencyKey: strings.TrimSpace(c.GetHeader("X-Idempotency-Key")),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, buildCreditOrderEnvelope(order)))
|
||||
}
|
||||
|
||||
func (h *Handler) ListTransactions(c *gin.Context) {
|
||||
client, ok := h.ready(c)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
pageValue, ok := intQuery(c, "page")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
pageSize, ok := intQuery(c, "page_size")
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), requestTimeout)
|
||||
defer cancel()
|
||||
|
||||
items, page, err := client.ListCreditTransactions(ctx, creditcontracts.ListCreditTransactionsRequest{
|
||||
ActorUserID: currentUserID(c),
|
||||
Page: pageValue,
|
||||
PageSize: pageSize,
|
||||
Source: c.Query("source"),
|
||||
Direction: c.Query("direction"),
|
||||
})
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
result := make([]creditTransactionEnvelope, 0, len(items))
|
||||
for i := range items {
|
||||
result = append(result, buildCreditTransactionEnvelope(items[i]))
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, newPageEnvelope(result, page)))
|
||||
}
|
||||
|
||||
func (h *Handler) ready(c *gin.Context) (TokenStoreClient, bool) {
|
||||
if h == nil || h.client == nil {
|
||||
c.JSON(http.StatusInternalServerError, respond.InternalError(errors.New("credit-store gateway client 未初始化")))
|
||||
return nil, false
|
||||
}
|
||||
return h.client, true
|
||||
}
|
||||
|
||||
func currentUserID(c *gin.Context) uint64 {
|
||||
userID := c.GetInt("user_id")
|
||||
if userID <= 0 {
|
||||
return 0
|
||||
}
|
||||
return uint64(userID)
|
||||
}
|
||||
|
||||
// buildCreditSummaryEnvelope 负责把权威余额快照转换成钱包摘要返回值。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. current_credit_total 明确表示“当前可用 Credit 总数”,口径直接复用权威余额。
|
||||
// 2. 老字段继续保留,避免影响现有前端和兼容逻辑。
|
||||
// 3. 这里只做展示层拼装,不在网关重复做扣费或账本计算。
|
||||
func buildCreditSummaryEnvelope(snapshot *creditcontracts.CreditBalanceSnapshot) creditSummaryEnvelope {
|
||||
if snapshot == nil {
|
||||
return creditSummaryEnvelope{
|
||||
CurrentCreditTotal: 0,
|
||||
RecordedCreditTotal: 0,
|
||||
AppliedCreditTotal: 0,
|
||||
PendingApplyCreditTotal: 0,
|
||||
ValidUntil: nil,
|
||||
QuotaSyncStatus: "synced",
|
||||
Tip: "当前为 Credit 权威账本,购买、奖励和 AI 消费都会实时入账。",
|
||||
}
|
||||
}
|
||||
|
||||
recordedTotal := snapshot.TotalRecharged + snapshot.TotalRewarded
|
||||
if recordedTotal <= 0 && snapshot.Balance > 0 {
|
||||
recordedTotal = snapshot.Balance + snapshot.TotalConsumed
|
||||
}
|
||||
|
||||
tip := "当前为 Credit 权威账本,购买、奖励和 AI 消费都会实时入账。"
|
||||
if snapshot.IsBlocked {
|
||||
tip = "当前 Credit 余额不足,AI 调用会被阻断;充值后会自动恢复。"
|
||||
}
|
||||
|
||||
return creditSummaryEnvelope{
|
||||
CurrentCreditTotal: snapshot.Balance,
|
||||
RecordedCreditTotal: maxInt64(recordedTotal, 0),
|
||||
AppliedCreditTotal: snapshot.Balance,
|
||||
PendingApplyCreditTotal: 0,
|
||||
ValidUntil: nil,
|
||||
QuotaSyncStatus: "synced",
|
||||
Tip: tip,
|
||||
}
|
||||
}
|
||||
|
||||
func buildCreditOrderEnvelope(order *creditcontracts.CreditOrderView) creditOrderEnvelope {
|
||||
if order == nil {
|
||||
return creditOrderEnvelope{
|
||||
PaymentAction: paymentAction{
|
||||
Type: "mock_paid",
|
||||
Label: "确认支付",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
return creditOrderEnvelope{
|
||||
OrderID: order.OrderID,
|
||||
OrderNo: order.OrderNo,
|
||||
Status: order.Status,
|
||||
Quantity: order.Quantity,
|
||||
CreditAmount: order.CreditAmount,
|
||||
AmountCent: order.AmountCent,
|
||||
PriceText: order.PriceText,
|
||||
Currency: order.Currency,
|
||||
PaymentMode: order.PaymentMode,
|
||||
ProductName: order.ProductName,
|
||||
ProductDetail: parseJSONMap(order.ProductSnapshot),
|
||||
CreatedAt: order.CreatedAt,
|
||||
PaidAt: order.PaidAt,
|
||||
CreditedAt: order.CreditedAt,
|
||||
PaymentAction: paymentAction{
|
||||
Type: "mock_paid",
|
||||
Label: "确认支付",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func buildCreditTransactionEnvelope(item creditcontracts.CreditTransactionView) creditTransactionEnvelope {
|
||||
return creditTransactionEnvelope{
|
||||
GrantID: item.TransactionID,
|
||||
SourceLabel: item.SourceLabel,
|
||||
Amount: item.Amount,
|
||||
Status: item.Status,
|
||||
Description: firstNonEmptyString(item.Description, item.SourceLabel),
|
||||
CreatedAt: item.CreatedAt,
|
||||
Direction: item.Direction,
|
||||
BalanceAfter: item.BalanceAfter,
|
||||
EventID: item.EventID,
|
||||
OrderID: item.OrderID,
|
||||
}
|
||||
}
|
||||
|
||||
func parseJSONMap(raw string) map[string]any {
|
||||
trimmed := strings.TrimSpace(raw)
|
||||
if trimmed == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
result := make(map[string]any)
|
||||
if err := json.Unmarshal([]byte(trimmed), &result); err != nil {
|
||||
return nil
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func newPageEnvelope[T any](items []T, page creditcontracts.PageResult) pageEnvelope[T] {
|
||||
return pageEnvelope[T]{
|
||||
Items: items,
|
||||
Page: page.Page,
|
||||
PageSize: page.PageSize,
|
||||
Total: page.Total,
|
||||
HasMore: page.HasMore,
|
||||
}
|
||||
}
|
||||
|
||||
func intQuery(c *gin.Context, key string) (int, bool) {
|
||||
raw := strings.TrimSpace(c.Query(key))
|
||||
if raw == "" {
|
||||
return 0, true
|
||||
}
|
||||
|
||||
value, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return 0, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
func uint64Param(c *gin.Context, key string) (uint64, bool) {
|
||||
value, err := strconv.ParseUint(strings.TrimSpace(c.Param(key)), 10, 64)
|
||||
if err != nil || value == 0 {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return 0, false
|
||||
}
|
||||
return value, true
|
||||
}
|
||||
|
||||
func firstNonEmptyString(values ...string) string {
|
||||
for _, value := range values {
|
||||
if trimmed := strings.TrimSpace(value); trimmed != "" {
|
||||
return trimmed
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func maxInt64(left int64, right int64) int64 {
|
||||
if left > right {
|
||||
return left
|
||||
}
|
||||
return right
|
||||
}
|
||||
|
||||
func normalizeCreditConsumptionPeriod(raw string) (string, error) {
|
||||
switch strings.TrimSpace(raw) {
|
||||
case "", creditConsumptionPeriod24h:
|
||||
return creditConsumptionPeriod24h, nil
|
||||
case creditConsumptionPeriod7d:
|
||||
return creditConsumptionPeriod7d, nil
|
||||
case creditConsumptionPeriod30d:
|
||||
return creditConsumptionPeriod30d, nil
|
||||
case creditConsumptionPeriodAll:
|
||||
return creditConsumptionPeriodAll, nil
|
||||
default:
|
||||
return "", errors.New("invalid consumption period")
|
||||
}
|
||||
}
|
||||
|
||||
func buildCreditConsumptionDashboard(ctx context.Context, client TokenStoreClient, actorUserID uint64, period string) (creditcontracts.CreditConsumptionDashboardView, error) {
|
||||
startAt, hasWindow := resolveCreditConsumptionWindow(period, time.Now())
|
||||
dashboard := creditcontracts.CreditConsumptionDashboardView{
|
||||
Period: period,
|
||||
}
|
||||
|
||||
for page := 1; ; page++ {
|
||||
items, pageResult, err := client.ListCreditTransactions(ctx, creditcontracts.ListCreditTransactionsRequest{
|
||||
ActorUserID: actorUserID,
|
||||
Page: page,
|
||||
PageSize: creditDashboardPageSize,
|
||||
Source: "charge",
|
||||
Direction: "expense",
|
||||
})
|
||||
if err != nil {
|
||||
return creditcontracts.CreditConsumptionDashboardView{}, err
|
||||
}
|
||||
if len(items) == 0 {
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
for _, item := range items {
|
||||
if strings.EqualFold(strings.TrimSpace(item.Status), "failed") {
|
||||
continue
|
||||
}
|
||||
|
||||
createdAt, ok := parseCreditTransactionCreatedAt(item.CreatedAt)
|
||||
if hasWindow && ok && createdAt.Before(startAt) {
|
||||
continue
|
||||
}
|
||||
if hasWindow && !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
dashboard.CreditConsumed += normalizeCreditConsumedAmount(item.Amount)
|
||||
dashboard.TokenConsumed += extractChargeTokenConsumed(item.MetadataJSON)
|
||||
}
|
||||
|
||||
if !pageResult.HasMore {
|
||||
return dashboard, nil
|
||||
}
|
||||
if hasWindow && isCreditTransactionPageBeforeWindow(items, startAt) {
|
||||
return dashboard, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func resolveCreditConsumptionWindow(period string, now time.Time) (time.Time, bool) {
|
||||
switch strings.TrimSpace(period) {
|
||||
case creditConsumptionPeriod24h:
|
||||
return now.Add(-24 * time.Hour), true
|
||||
case creditConsumptionPeriod7d:
|
||||
return now.Add(-7 * 24 * time.Hour), true
|
||||
case creditConsumptionPeriod30d:
|
||||
return now.Add(-30 * 24 * time.Hour), true
|
||||
default:
|
||||
return time.Time{}, false
|
||||
}
|
||||
}
|
||||
|
||||
func parseCreditTransactionCreatedAt(raw string) (time.Time, bool) {
|
||||
parsed, err := time.Parse(time.RFC3339, strings.TrimSpace(raw))
|
||||
if err != nil {
|
||||
return time.Time{}, false
|
||||
}
|
||||
return parsed, true
|
||||
}
|
||||
|
||||
func isCreditTransactionPageBeforeWindow(items []creditcontracts.CreditTransactionView, startAt time.Time) bool {
|
||||
if len(items) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
oldest, ok := parseCreditTransactionCreatedAt(items[len(items)-1].CreatedAt)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return oldest.Before(startAt)
|
||||
}
|
||||
|
||||
func normalizeCreditConsumedAmount(amount int64) int64 {
|
||||
if amount >= 0 {
|
||||
return 0
|
||||
}
|
||||
return -amount
|
||||
}
|
||||
|
||||
func extractChargeTokenConsumed(metadataJSON string) int64 {
|
||||
if strings.TrimSpace(metadataJSON) == "" {
|
||||
return 0
|
||||
}
|
||||
|
||||
var metadata struct {
|
||||
InputTokens int64 `json:"input_tokens"`
|
||||
OutputTokens int64 `json:"output_tokens"`
|
||||
TotalTokens int64 `json:"total_tokens"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(metadataJSON), &metadata); err != nil {
|
||||
return 0
|
||||
}
|
||||
if metadata.TotalTokens > 0 {
|
||||
return metadata.TotalTokens
|
||||
}
|
||||
|
||||
total := metadata.InputTokens + metadata.OutputTokens
|
||||
if total < 0 {
|
||||
return 0
|
||||
}
|
||||
return total
|
||||
}
|
||||
35
backend/gateway/api/tokenstoreapi/routes.go
Normal file
35
backend/gateway/api/tokenstoreapi/routes.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package tokenstoreapi
|
||||
|
||||
import (
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// RegisterRoutes 把 Credit 商店 HTTP 入口挂到 gateway 路由组。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只注册 /credit-store 下的边缘路由,不承载底层订单和账本实现细节;
|
||||
// 2. P0 全部接口都要求登录,并统一走限流保护;
|
||||
// 3. 只有创建订单与 mock paid 需要幂等键,避免重复下单或重复确认支付。
|
||||
func RegisterRoutes(apiGroup *gin.RouterGroup, handler *Handler, authClient ports.AccessTokenValidator, cache *dao.CacheDAO, limiter *ratelimit.RateLimiter) {
|
||||
if apiGroup == nil || handler == nil {
|
||||
return
|
||||
}
|
||||
|
||||
tokenStoreGroup := apiGroup.Group("/credit-store")
|
||||
tokenStoreGroup.Use(gatewaymiddleware.JWTTokenAuth(authClient), rootmiddleware.RateLimitMiddleware(limiter, 20, 1))
|
||||
{
|
||||
tokenStoreGroup.GET("/summary", handler.GetSummary)
|
||||
tokenStoreGroup.GET("/consumption-dashboard", handler.GetConsumptionDashboard)
|
||||
tokenStoreGroup.GET("/products", handler.ListProducts)
|
||||
tokenStoreGroup.POST("/orders", rootmiddleware.IdempotencyMiddleware(cache), handler.CreateOrder)
|
||||
tokenStoreGroup.GET("/orders", handler.ListOrders)
|
||||
tokenStoreGroup.GET("/orders/:order_id", handler.GetOrder)
|
||||
tokenStoreGroup.POST("/orders/:order_id/mock-paid", rootmiddleware.IdempotencyMiddleware(cache), handler.MockPaidOrder)
|
||||
tokenStoreGroup.GET("/transactions", handler.ListTransactions)
|
||||
}
|
||||
}
|
||||
59
backend/gateway/api/userauth/dto.go
Normal file
59
backend/gateway/api/userauth/dto.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package userauthapi
|
||||
|
||||
import contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/userauth"
|
||||
|
||||
// geeTestValidatePayload 只承载 gateway 边界的人机验证字段。
|
||||
// 职责边界:
|
||||
// 1. 负责承接前端提交的 geetest 三元组;
|
||||
// 2. 不负责 user/auth RPC 入参映射,避免把第三方验证码字段带入内部服务契约;
|
||||
// 3. 不负责校验逻辑,真正校验由 GeeTestService 完成。
|
||||
type geeTestValidatePayload struct {
|
||||
Challenge string `json:"geetest_challenge"`
|
||||
Validate string `json:"geetest_validate"`
|
||||
Seccode string `json:"geetest_seccode"`
|
||||
}
|
||||
|
||||
type registerRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
PhoneNumber string `json:"phone_number"`
|
||||
|
||||
geeTestValidatePayload
|
||||
}
|
||||
|
||||
func (r registerRequest) toContract() contracts.RegisterRequest {
|
||||
return contracts.RegisterRequest{
|
||||
Username: r.Username,
|
||||
Password: r.Password,
|
||||
PhoneNumber: r.PhoneNumber,
|
||||
}
|
||||
}
|
||||
|
||||
func (r registerRequest) captchaPayload() geeTestValidatePayload {
|
||||
return r.geeTestValidatePayload
|
||||
}
|
||||
|
||||
type loginRequest struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
|
||||
geeTestValidatePayload
|
||||
}
|
||||
|
||||
func (r loginRequest) toContract() contracts.LoginRequest {
|
||||
return contracts.LoginRequest{
|
||||
Username: r.Username,
|
||||
Password: r.Password,
|
||||
}
|
||||
}
|
||||
|
||||
func (r loginRequest) captchaPayload() geeTestValidatePayload {
|
||||
return r.geeTestValidatePayload
|
||||
}
|
||||
|
||||
type captchaRegisterResponse struct {
|
||||
Success int `json:"success"`
|
||||
GT string `json:"gt"`
|
||||
Challenge string `json:"challenge"`
|
||||
NewCaptcha bool `json:"new_captcha"`
|
||||
}
|
||||
188
backend/gateway/api/userauth/geetest.go
Normal file
188
backend/gateway/api/userauth/geetest.go
Normal file
@@ -0,0 +1,188 @@
|
||||
package userauthapi
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
const (
|
||||
geeTestRegisterURL = "https://api.geetest.com/register.php"
|
||||
geeTestValidateURL = "http://api.geetest.com/validate.php"
|
||||
geeTestClientType = "web"
|
||||
geeTestSDKName = "smartflow-gateway-go/1.0"
|
||||
)
|
||||
|
||||
type geeTestRegisterUpstreamResponse struct {
|
||||
Challenge string `json:"challenge"`
|
||||
}
|
||||
|
||||
type geeTestValidateUpstreamResponse struct {
|
||||
Seccode string `json:"seccode"`
|
||||
}
|
||||
|
||||
// GeeTestService 负责封装 gateway 与极验 v3 接口的最小交互。
|
||||
// 职责边界:
|
||||
// 1. 只处理验证码初始化与二次校验,不承载登录/注册业务;
|
||||
// 2. 只暴露 gateway HTTP 层真正需要的最小方法,避免把第三方协议散落到 handler;
|
||||
// 3. 不做离线 failback 存储,当前阶段聚焦“在线校验闭环”这一个能力域。
|
||||
type GeeTestService struct {
|
||||
captchaID string
|
||||
privateKey string
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
func NewGeeTestServiceFromConfig() *GeeTestService {
|
||||
return &GeeTestService{
|
||||
captchaID: strings.TrimSpace(viper.GetString("geetest.captchaID")),
|
||||
privateKey: strings.TrimSpace(viper.GetString("geetest.privateKey")),
|
||||
httpClient: &http.Client{Timeout: 3 * time.Second},
|
||||
}
|
||||
}
|
||||
|
||||
// Register 负责向极验申请当前页 challenge,并转成前端 `initGeetest` 可直接消费的结构。
|
||||
// 职责边界:
|
||||
// 1. 只对应官方 API1 初始化;
|
||||
// 2. 不负责缓存 challenge,也不负责表单业务字段;
|
||||
// 3. 若极验服务不可用,直接返回初始化失败,让前端走显式提示。
|
||||
func (s *GeeTestService) Register(ctx context.Context, clientIP string) (*captchaRegisterResponse, error) {
|
||||
if !s.isConfigured() {
|
||||
return nil, respond.CaptchaInitFailed
|
||||
}
|
||||
|
||||
// 1. 先按官方 API1 约定拉取原始 challenge。
|
||||
// 2. 再使用 privateKey 做一次签名混淆,避免把上游原始 challenge 直接暴露给前端。
|
||||
// 3. 任一步失败都直接中断,让登录/注册入口显式暴露初始化异常。
|
||||
query := url.Values{}
|
||||
query.Set("digestmod", "md5")
|
||||
query.Set("gt", s.captchaID)
|
||||
query.Set("json_format", "1")
|
||||
query.Set("sdk", geeTestSDKName)
|
||||
query.Set("client_type", geeTestClientType)
|
||||
if ip := strings.TrimSpace(clientIP); ip != "" {
|
||||
query.Set("ip_address", ip)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, geeTestRegisterURL+"?"+query.Encode(), nil)
|
||||
if err != nil {
|
||||
return nil, respond.CaptchaInitFailed
|
||||
}
|
||||
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, respond.CaptchaInitFailed
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
rawBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, respond.CaptchaInitFailed
|
||||
}
|
||||
|
||||
challenge := extractRegisterChallenge(rawBody)
|
||||
if challenge == "" {
|
||||
return nil, respond.CaptchaInitFailed
|
||||
}
|
||||
|
||||
return &captchaRegisterResponse{
|
||||
Success: 1,
|
||||
GT: s.captchaID,
|
||||
Challenge: md5Hex(challenge + s.privateKey),
|
||||
NewCaptcha: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Verify 负责校验前端回传的极验三元组。
|
||||
// 职责边界:
|
||||
// 1. 先做本地 challenge/validate 一致性检查,尽早拦截无效请求;
|
||||
// 2. 再调用官方 API2 校验 seccode,保证验证码结果真实有效;
|
||||
// 3. 只返回“通过/失败/服务不可用”三类结论,不混入登录注册业务判断。
|
||||
func (s *GeeTestService) Verify(ctx context.Context, payload geeTestValidatePayload, clientIP string) error {
|
||||
if !s.isConfigured() {
|
||||
return respond.CaptchaVerifyUnavailable
|
||||
}
|
||||
|
||||
challenge := strings.TrimSpace(payload.Challenge)
|
||||
validate := strings.TrimSpace(payload.Validate)
|
||||
seccode := strings.TrimSpace(payload.Seccode)
|
||||
if challenge == "" || validate == "" || seccode == "" {
|
||||
return respond.MissingParam
|
||||
}
|
||||
|
||||
// 1. 先按极验 v3 协议校验 validate 是否与 challenge/privateKey 匹配。
|
||||
// 2. 若本地签名都对不上,直接判失败,避免继续请求第三方接口。
|
||||
// 3. 只有本地签名通过后,才继续调用 API2 复核 seccode。
|
||||
expectedValidate := md5Hex(s.privateKey + "geetest" + challenge)
|
||||
if !strings.EqualFold(validate, expectedValidate) {
|
||||
return respond.CaptchaVerifyFailed
|
||||
}
|
||||
|
||||
form := url.Values{}
|
||||
form.Set("captchaid", s.captchaID)
|
||||
form.Set("challenge", challenge)
|
||||
form.Set("seccode", seccode)
|
||||
form.Set("json_format", "1")
|
||||
form.Set("sdk", geeTestSDKName)
|
||||
form.Set("client_type", geeTestClientType)
|
||||
if ip := strings.TrimSpace(clientIP); ip != "" {
|
||||
form.Set("ip_address", ip)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, geeTestValidateURL, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
return respond.CaptchaVerifyUnavailable
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
|
||||
resp, err := s.httpClient.Do(req)
|
||||
if err != nil {
|
||||
return respond.CaptchaVerifyUnavailable
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
rawBody, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return respond.CaptchaVerifyUnavailable
|
||||
}
|
||||
|
||||
if !matchValidateSeccode(rawBody, seccode) {
|
||||
return respond.CaptchaVerifyFailed
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *GeeTestService) isConfigured() bool {
|
||||
return s != nil && s.captchaID != "" && s.privateKey != ""
|
||||
}
|
||||
|
||||
func extractRegisterChallenge(rawBody []byte) string {
|
||||
var payload geeTestRegisterUpstreamResponse
|
||||
if err := json.Unmarshal(rawBody, &payload); err == nil {
|
||||
return strings.TrimSpace(payload.Challenge)
|
||||
}
|
||||
return strings.TrimSpace(string(rawBody))
|
||||
}
|
||||
|
||||
func matchValidateSeccode(rawBody []byte, seccode string) bool {
|
||||
expected := md5Hex(strings.TrimSpace(seccode))
|
||||
|
||||
var payload geeTestValidateUpstreamResponse
|
||||
if err := json.Unmarshal(rawBody, &payload); err == nil {
|
||||
return strings.EqualFold(strings.TrimSpace(payload.Seccode), expected)
|
||||
}
|
||||
return strings.EqualFold(strings.TrimSpace(string(rawBody)), expected)
|
||||
}
|
||||
|
||||
func md5Hex(input string) string {
|
||||
sum := md5.Sum([]byte(input))
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
@@ -7,32 +7,64 @@ import (
|
||||
"time"
|
||||
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/userauth"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type UserHandler struct {
|
||||
client ports.UserCommandClient
|
||||
client ports.UserCommandClient
|
||||
captcha *GeeTestService
|
||||
allowRegister bool
|
||||
}
|
||||
|
||||
// NewUserHandler 只接收 user/auth 客户端,不再直接依赖本地 user service。
|
||||
func NewUserHandler(client ports.UserCommandClient) *UserHandler {
|
||||
return &UserHandler{client: client}
|
||||
// NewUserHandler 只接收 user/auth 客户端与验证码服务,不再直接依赖本地 user service。
|
||||
func NewUserHandler(client ports.UserCommandClient, captcha *GeeTestService, allowRegister bool) *UserHandler {
|
||||
return &UserHandler{
|
||||
client: client,
|
||||
captcha: captcha,
|
||||
allowRegister: allowRegister,
|
||||
}
|
||||
}
|
||||
|
||||
func (api *UserHandler) CaptchaRegister(c *gin.Context) {
|
||||
captchaCtx, cancel := context.WithTimeout(c.Request.Context(), 3*time.Second)
|
||||
defer cancel()
|
||||
|
||||
registerData, err := api.captcha.Register(captchaCtx, c.ClientIP())
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.RespWithData(respond.Ok, registerData))
|
||||
}
|
||||
|
||||
func (api *UserHandler) UserRegister(c *gin.Context) {
|
||||
var req contracts.RegisterRequest
|
||||
if !api.ensureRegisterEnabled(c) {
|
||||
return
|
||||
}
|
||||
|
||||
var req registerRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
// 1. 先用独立超时完成极验二次校验,避免第三方接口抖动侵入内部 RPC 超时预算。
|
||||
// 2. 只有验证码通过后才继续调 user/auth 注册服务,防止无效流量进入内部链路。
|
||||
// 3. 内部 RPC 仍保留原先 2 秒超时边界,不改变现有 user/auth 服务 SLA。
|
||||
captchaCtx, cancelCaptcha := context.WithTimeout(c.Request.Context(), 3*time.Second)
|
||||
defer cancelCaptcha()
|
||||
if err := api.captcha.Verify(captchaCtx, req.captchaPayload(), c.ClientIP()); err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
retUser, err := api.client.Register(ctx, req)
|
||||
retUser, err := api.client.Register(ctx, req.toContract())
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
@@ -41,16 +73,23 @@ func (api *UserHandler) UserRegister(c *gin.Context) {
|
||||
}
|
||||
|
||||
func (api *UserHandler) UserLogin(c *gin.Context) {
|
||||
var req contracts.LoginRequest
|
||||
var req loginRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
c.JSON(http.StatusBadRequest, respond.WrongParamType)
|
||||
return
|
||||
}
|
||||
|
||||
captchaCtx, cancelCaptcha := context.WithTimeout(c.Request.Context(), 3*time.Second)
|
||||
defer cancelCaptcha()
|
||||
if err := api.captcha.Verify(captchaCtx, req.captchaPayload(), c.ClientIP()); err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
tokens, err := api.client.Login(ctx, req)
|
||||
tokens, err := api.client.Login(ctx, req.toContract())
|
||||
if err != nil {
|
||||
respond.DealWithError(c, err)
|
||||
return
|
||||
@@ -96,3 +135,20 @@ func (api *UserHandler) UserLogout(c *gin.Context) {
|
||||
}
|
||||
c.JSON(http.StatusOK, respond.Ok)
|
||||
}
|
||||
|
||||
// ensureRegisterEnabled 负责统一收口“注册相关入口”的开关判断。
|
||||
// 职责边界:
|
||||
// 1. 只判断当前环境是否允许注册,并向前端返回明确的 403;
|
||||
// 2. 不负责登录、刷新 token、登出等其它 user/auth 能力;
|
||||
// 3. 不触发验证码或 RPC 调用,避免“已关闭注册”时仍向下游产生无效流量。
|
||||
func (api *UserHandler) ensureRegisterEnabled(c *gin.Context) bool {
|
||||
if api.allowRegister {
|
||||
return true
|
||||
}
|
||||
|
||||
c.JSON(http.StatusForbidden, respond.Response{
|
||||
Status: "40301",
|
||||
Info: "registration is disabled",
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -2,8 +2,8 @@ package userauthapi
|
||||
|
||||
import (
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
@@ -13,13 +13,14 @@ import (
|
||||
// 1. 只注册 /user 下的边缘路由,不关心其它业务域路由;
|
||||
// 2. 登录、注册、刷新 token 只做请求转发;登出需要先经过 access token 边缘鉴权;
|
||||
// 3. 限流仍复用当前通用中间件,后续若 gateway 独立成包,可再整体下沉。
|
||||
func RegisterRoutes(apiGroup *gin.RouterGroup, handler *UserHandler, authClient ports.AccessTokenValidator, limiter *pkg.RateLimiter) {
|
||||
func RegisterRoutes(apiGroup *gin.RouterGroup, handler *UserHandler, authClient ports.AccessTokenValidator, limiter *ratelimit.RateLimiter) {
|
||||
if apiGroup == nil || handler == nil {
|
||||
return
|
||||
}
|
||||
|
||||
userGroup := apiGroup.Group("/user")
|
||||
{
|
||||
userGroup.GET("/captcha/register", handler.CaptchaRegister)
|
||||
userGroup.POST("/register", handler.UserRegister)
|
||||
userGroup.POST("/login", handler.UserLogin)
|
||||
userGroup.POST("/refresh-token", handler.RefreshTokenHandler)
|
||||
|
||||
147
backend/gateway/middleware/cors.go
Normal file
147
backend/gateway/middleware/cors.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type CORSOptions struct {
|
||||
AllowedOrigins []string
|
||||
AllowedMethods []string
|
||||
AllowedHeaders []string
|
||||
ExposedHeaders []string
|
||||
AllowCredentials bool
|
||||
MaxAge time.Duration
|
||||
}
|
||||
|
||||
func CORSMiddleware(opts CORSOptions) gin.HandlerFunc {
|
||||
origins := normalizeHeaderValues(opts.AllowedOrigins)
|
||||
if len(origins) == 0 {
|
||||
return func(c *gin.Context) {
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
methods := normalizeHeaderValuesWithDefaults(opts.AllowedMethods, []string{
|
||||
http.MethodGet,
|
||||
http.MethodPost,
|
||||
http.MethodPut,
|
||||
http.MethodPatch,
|
||||
http.MethodDelete,
|
||||
http.MethodOptions,
|
||||
})
|
||||
headers := normalizeHeaderValuesWithDefaults(opts.AllowedHeaders, []string{
|
||||
"Authorization",
|
||||
"Content-Type",
|
||||
"Accept",
|
||||
"Origin",
|
||||
"X-Requested-With",
|
||||
"Idempotency-Key",
|
||||
})
|
||||
exposedHeaders := normalizeHeaderValues(opts.ExposedHeaders)
|
||||
maxAge := opts.MaxAge
|
||||
if maxAge <= 0 {
|
||||
maxAge = 12 * time.Hour
|
||||
}
|
||||
|
||||
return func(c *gin.Context) {
|
||||
origin := strings.TrimSpace(c.GetHeader("Origin"))
|
||||
if origin == "" {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
allowedOrigin := matchAllowedOrigin(origin, origins)
|
||||
if allowedOrigin == "" {
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
c.AbortWithStatus(http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
setVaryHeader(c.Writer.Header(), "Origin")
|
||||
c.Header("Access-Control-Allow-Origin", allowedOrigin)
|
||||
if opts.AllowCredentials && allowedOrigin != "*" {
|
||||
c.Header("Access-Control-Allow-Credentials", "true")
|
||||
}
|
||||
if len(exposedHeaders) > 0 {
|
||||
c.Header("Access-Control-Expose-Headers", strings.Join(exposedHeaders, ", "))
|
||||
}
|
||||
|
||||
if c.Request.Method == http.MethodOptions {
|
||||
setVaryHeader(c.Writer.Header(), "Access-Control-Request-Method")
|
||||
setVaryHeader(c.Writer.Header(), "Access-Control-Request-Headers")
|
||||
c.Header("Access-Control-Allow-Methods", strings.Join(methods, ", "))
|
||||
c.Header("Access-Control-Allow-Headers", strings.Join(headers, ", "))
|
||||
c.Header("Access-Control-Max-Age", formatMaxAgeSeconds(maxAge))
|
||||
c.AbortWithStatus(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
|
||||
func matchAllowedOrigin(origin string, allowedOrigins []string) string {
|
||||
for _, allowedOrigin := range allowedOrigins {
|
||||
if allowedOrigin == "*" {
|
||||
return "*"
|
||||
}
|
||||
if strings.EqualFold(origin, allowedOrigin) {
|
||||
return origin
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func normalizeHeaderValues(values []string) []string {
|
||||
seen := make(map[string]struct{}, len(values))
|
||||
normalized := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
key := strings.ToLower(trimmed)
|
||||
if _, exists := seen[key]; exists {
|
||||
continue
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
normalized = append(normalized, trimmed)
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
func normalizeHeaderValuesWithDefaults(values []string, defaults []string) []string {
|
||||
normalized := normalizeHeaderValues(values)
|
||||
if len(normalized) > 0 {
|
||||
return normalized
|
||||
}
|
||||
return normalizeHeaderValues(defaults)
|
||||
}
|
||||
|
||||
func setVaryHeader(header http.Header, value string) {
|
||||
existing := header.Values("Vary")
|
||||
for _, entry := range existing {
|
||||
for _, part := range strings.Split(entry, ",") {
|
||||
if strings.EqualFold(strings.TrimSpace(part), value) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
header.Add("Vary", value)
|
||||
}
|
||||
|
||||
func formatMaxAgeSeconds(maxAge time.Duration) string {
|
||||
seconds := int(maxAge / time.Second)
|
||||
if seconds < 0 {
|
||||
seconds = 0
|
||||
}
|
||||
return strconv.Itoa(seconds)
|
||||
}
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -4,12 +4,12 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
func RateLimitMiddleware(limiter *pkg.RateLimiter, capacity, rate int) gin.HandlerFunc {
|
||||
func RateLimitMiddleware(limiter *ratelimit.RateLimiter, capacity, rate int) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
// 1. 确定限流对象:可以用 UserID,也可以用 IP
|
||||
// 这里建议用 UserID,防止某个用户换 IP 疯狂刷
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/shared/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// TokenQuotaGuard 在请求入口做 token 额度门禁。
|
||||
// 职责边界:
|
||||
// 1. 只负责调用 user/auth 服务判断当前用户是否还能继续消耗 token;
|
||||
// 2. 不再直连 users 表或 Redis 额度细节;
|
||||
// 3. 额度超限时直接拒绝,不进入业务 handler。
|
||||
func TokenQuotaGuard(checker ports.TokenQuotaChecker) gin.HandlerFunc {
|
||||
return func(c *gin.Context) {
|
||||
if checker == nil {
|
||||
c.JSON(http.StatusInternalServerError, respond.InternalError(errors.New("token quota checker dependency not initialized")))
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
userID := c.GetInt("user_id")
|
||||
if userID <= 0 {
|
||||
c.JSON(http.StatusUnauthorized, respond.ErrUnauthorized)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
resp, err := checker.CheckTokenQuota(ctx, userID)
|
||||
if err != nil {
|
||||
writeRespondError(c, err)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
if resp == nil || !resp.Allowed {
|
||||
c.JSON(http.StatusBadRequest, respond.TokenUsageExceedsLimit)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
}
|
||||
@@ -5,14 +5,19 @@ import (
|
||||
"errors"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
taskclassforumclient "github.com/LoveLosita/smartflow/backend/client/taskclassforum"
|
||||
tokenstoreclient "github.com/LoveLosita/smartflow/backend/client/tokenstore"
|
||||
"github.com/LoveLosita/smartflow/backend/gateway/api"
|
||||
forumapi "github.com/LoveLosita/smartflow/backend/gateway/api/forumapi"
|
||||
tokenstoreapi "github.com/LoveLosita/smartflow/backend/gateway/api/tokenstoreapi"
|
||||
userauthapi "github.com/LoveLosita/smartflow/backend/gateway/api/userauth"
|
||||
gatewaymiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/pkg"
|
||||
rootmiddleware "github.com/LoveLosita/smartflow/backend/gateway/middleware"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/dao"
|
||||
ratelimit "github.com/LoveLosita/smartflow/backend/shared/infra/ratelimit"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/spf13/viper"
|
||||
@@ -55,8 +60,22 @@ func StartEngine(ctx context.Context, r *gin.Engine) {
|
||||
}
|
||||
}
|
||||
|
||||
func RegisterRouters(handlers *api.ApiHandlers, authClient ports.UserAuthClient, cache *dao.CacheDAO, limiter *pkg.RateLimiter) *gin.Engine {
|
||||
func RegisterRouters(
|
||||
handlers *api.ApiHandlers,
|
||||
authClient ports.UserAuthClient,
|
||||
forumClient *taskclassforumclient.Client,
|
||||
tokenStoreClient *tokenstoreclient.Client,
|
||||
cache *dao.CacheDAO,
|
||||
limiter *ratelimit.RateLimiter,
|
||||
) *gin.Engine {
|
||||
r := gin.Default()
|
||||
r.Use(gatewaymiddleware.CORSMiddleware(gatewaymiddleware.CORSOptions{
|
||||
AllowedOrigins: readConfigList("cors.allowedOrigins"),
|
||||
AllowedMethods: readConfigList("cors.allowedMethods"),
|
||||
AllowedHeaders: readConfigList("cors.allowedHeaders"),
|
||||
ExposedHeaders: readConfigList("cors.exposedHeaders"),
|
||||
AllowCredentials: viper.GetBool("cors.allowCredentials"),
|
||||
}))
|
||||
apiGroup := r.Group("/api/v1")
|
||||
{
|
||||
apiGroup.GET("/health", func(c *gin.Context) {
|
||||
@@ -66,7 +85,13 @@ func RegisterRouters(handlers *api.ApiHandlers, authClient ports.UserAuthClient,
|
||||
})
|
||||
})
|
||||
|
||||
userauthapi.RegisterRoutes(apiGroup, userauthapi.NewUserHandler(authClient), authClient, limiter)
|
||||
userauthapi.RegisterRoutes(apiGroup, userauthapi.NewUserHandler(
|
||||
authClient,
|
||||
userauthapi.NewGeeTestServiceFromConfig(),
|
||||
readUserAuthAllowRegister(),
|
||||
), authClient, limiter)
|
||||
forumapi.RegisterRoutes(apiGroup, forumapi.NewHandler(forumClient), authClient, cache, limiter)
|
||||
tokenstoreapi.RegisterRoutes(apiGroup, tokenstoreapi.NewHandler(tokenStoreClient), authClient, cache, limiter)
|
||||
|
||||
taskGroup := apiGroup.Group("/task")
|
||||
{
|
||||
@@ -117,7 +142,7 @@ func RegisterRouters(handlers *api.ApiHandlers, authClient ports.UserAuthClient,
|
||||
agentGroup := apiGroup.Group("/agent")
|
||||
{
|
||||
agentGroup.Use(gatewaymiddleware.JWTTokenAuth(authClient), rootmiddleware.RateLimitMiddleware(limiter, 20, 1))
|
||||
agentGroup.POST("/chat", gatewaymiddleware.TokenQuotaGuard(authClient), handlers.AgentHandler.ChatAgent)
|
||||
agentGroup.POST("/chat", handlers.AgentHandler.ChatAgent)
|
||||
agentGroup.GET("/conversation-meta", handlers.AgentHandler.GetConversationMeta)
|
||||
agentGroup.GET("/conversation-list", handlers.AgentHandler.GetConversationList)
|
||||
agentGroup.GET("/conversation-timeline", handlers.AgentHandler.GetConversationTimeline)
|
||||
@@ -160,3 +185,62 @@ func RegisterRouters(handlers *api.ApiHandlers, authClient ports.UserAuthClient,
|
||||
log.Println("Routes setup completed")
|
||||
return r
|
||||
}
|
||||
|
||||
func readConfigList(key string) []string {
|
||||
values := viper.GetStringSlice(key)
|
||||
if len(values) > 0 {
|
||||
return compactConfigList(expandConfigList(values))
|
||||
}
|
||||
|
||||
raw := strings.TrimSpace(viper.GetString(key))
|
||||
if raw == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
splitted := strings.FieldsFunc(raw, func(r rune) bool {
|
||||
return r == ',' || r == '\n' || r == '\r' || r == ';'
|
||||
})
|
||||
return compactConfigList(splitted)
|
||||
}
|
||||
|
||||
func expandConfigList(values []string) []string {
|
||||
expanded := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
parts := strings.FieldsFunc(value, func(r rune) bool {
|
||||
return r == ',' || r == '\n' || r == '\r' || r == ';'
|
||||
})
|
||||
if len(parts) == 0 {
|
||||
expanded = append(expanded, value)
|
||||
continue
|
||||
}
|
||||
expanded = append(expanded, parts...)
|
||||
}
|
||||
return expanded
|
||||
}
|
||||
|
||||
func compactConfigList(values []string) []string {
|
||||
seen := make(map[string]struct{}, len(values))
|
||||
result := make([]string, 0, len(values))
|
||||
for _, value := range values {
|
||||
trimmed := strings.TrimSpace(value)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
key := strings.ToLower(trimmed)
|
||||
if _, exists := seen[key]; exists {
|
||||
continue
|
||||
}
|
||||
seen[key] = struct{}{}
|
||||
result = append(result, trimmed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func readUserAuthAllowRegister() bool {
|
||||
// 1. 缺省保持“允许注册”,避免历史未补配置的环境升级后被静默改行为。
|
||||
// 2. 只有显式把 userauth.allowRegister 设成 false,才真正关闭注册入口。
|
||||
if !viper.IsSet("userauth.allowRegister") {
|
||||
return true
|
||||
}
|
||||
return viper.GetBool("userauth.allowRegister")
|
||||
}
|
||||
|
||||
81
backend/gateway/shared/respond/respond.go
Normal file
81
backend/gateway/shared/respond/respond.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// Package respond 承载 gateway HTTP 门面使用的响应适配入口。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只面向 gateway/api 与 gateway/middleware,统一 HTTP JSON 写回与错误响应常量的 import 位置;
|
||||
// 2. 迁移期继续复用根 backend/respond 的响应码和错误语义,避免一次性改动服务层、RPC 层和 client 层;
|
||||
// 3. 不承载任何服务私有业务逻辑,服务代码禁止反向 import backend/gateway/shared/respond。
|
||||
package respond
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
rootrespond "github.com/LoveLosita/smartflow/backend/shared/respond"
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type (
|
||||
// Response 是 gateway 透传给前端的项目响应码结构。
|
||||
Response = rootrespond.Response
|
||||
|
||||
// FinalResponse 是带 data 字段的统一 HTTP 响应结构。
|
||||
FinalResponse = rootrespond.FinalResponse
|
||||
)
|
||||
|
||||
var (
|
||||
Ok = rootrespond.Ok
|
||||
UserTasksEmpty = rootrespond.UserTasksEmpty
|
||||
NoOngoingOrUpcomingSchedule = rootrespond.NoOngoingOrUpcomingSchedule
|
||||
TaskAlreadyDeleted = rootrespond.TaskAlreadyDeleted
|
||||
WrongParamType = rootrespond.WrongParamType
|
||||
MissingParam = rootrespond.MissingParam
|
||||
MissingIdempotencyKey = rootrespond.MissingIdempotencyKey
|
||||
MissingToken = rootrespond.MissingToken
|
||||
InvalidClaims = rootrespond.InvalidClaims
|
||||
ErrUnauthorized = rootrespond.ErrUnauthorized
|
||||
RequestIsProcessing = rootrespond.RequestIsProcessing
|
||||
ScheduleConflict = rootrespond.ScheduleConflict
|
||||
TooManyRequests = rootrespond.TooManyRequests
|
||||
TokenUsageExceedsLimit = rootrespond.TokenUsageExceedsLimit
|
||||
ConversationNotFound = rootrespond.ConversationNotFound
|
||||
MissingConversationID = rootrespond.MissingConversationID
|
||||
CaptchaVerifyFailed = rootrespond.CaptchaVerifyFailed
|
||||
CaptchaInitFailed = rootrespond.CaptchaInitFailed
|
||||
CaptchaVerifyUnavailable = rootrespond.CaptchaVerifyUnavailable
|
||||
)
|
||||
|
||||
// RespWithData 为 gateway HTTP 门面生成带 data 的统一响应体。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做响应结构组装,不决定 HTTP 状态码;
|
||||
// 2. 响应码来源仍是根 respond,保证迁移前后前端协议不变。
|
||||
func RespWithData(response Response, data interface{}) FinalResponse {
|
||||
return rootrespond.RespWithData(response, data)
|
||||
}
|
||||
|
||||
// DealWithError 将项目 error 映射为 HTTP JSON 响应。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只在 gateway HTTP 层写响应;
|
||||
// 2. 业务错误语义仍由根 respond 统一维护;
|
||||
// 3. nil error 直接忽略,保持旧 DealWithError 的降级语义。
|
||||
func DealWithError(c *gin.Context, err error) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
var resp Response
|
||||
if errors.Is(err, UserTasksEmpty) || errors.Is(err, NoOngoingOrUpcomingSchedule) || errors.Is(err, TaskAlreadyDeleted) {
|
||||
c.JSON(http.StatusOK, err)
|
||||
return
|
||||
}
|
||||
if errors.As(err, &resp) {
|
||||
c.JSON(resp.HTTPStatus(), resp)
|
||||
return
|
||||
}
|
||||
c.JSON(http.StatusInternalServerError, InternalError(err))
|
||||
}
|
||||
|
||||
// InternalError 生成 500 类响应体,供 gateway 依赖缺失等边缘错误使用。
|
||||
func InternalError(err error) Response {
|
||||
return rootrespond.InternalError(err)
|
||||
}
|
||||
@@ -1,10 +1,20 @@
|
||||
cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
|
||||
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
||||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
|
||||
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=
|
||||
github.com/IBM/sarama v1.43.1/go.mod h1:GG5q1RURtDNPz8xxJs3mgX6Ytak8Z9eLhAkJPObe2xE=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o=
|
||||
github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE=
|
||||
github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
|
||||
github.com/alicebob/miniredis/v2 v2.37.0 h1:RheObYW32G1aiJIj81XVt78ZHJpHonHLHW7OLIshq68=
|
||||
github.com/alicebob/miniredis/v2 v2.37.0/go.mod h1:TcL7YfarKPGDAthEtl5NBeHZfeUQj6OXMm/+iu5cLMM=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=
|
||||
github.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=
|
||||
@@ -45,6 +55,7 @@ github.com/cloudwego/eino-ext/components/model/ark v0.1.64 h1:ecsP4xWhOGi6NYxl2N
|
||||
github.com/cloudwego/eino-ext/components/model/ark v0.1.64/go.mod h1:aabMR15RTXBSi9Eu13CWavzE+no5BQO4FJUEEdqImbg=
|
||||
github.com/cloudwego/eino-ext/libs/acl/openai v0.1.13 h1:z0bI5TH3nE+uDQiRhxBQMvk2HswlDUM3xP38+VSgpSQ=
|
||||
github.com/cloudwego/eino-ext/libs/acl/openai v0.1.13/go.mod h1:1xMQZ8eE11pkEoTAEy8UlaAY817qGVMvjpDPGSIO3Ns=
|
||||
github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=
|
||||
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
|
||||
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
|
||||
@@ -57,21 +68,30 @@ github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/r
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/eino-contrib/jsonschema v1.0.3 h1:2Kfsm1xlMV0ssY2nuxshS4AwbLFuqmPmzIjLVJ1Fsp0=
|
||||
github.com/eino-contrib/jsonschema v1.0.3/go.mod h1:cpnX4SyKjWjGC7iN2EbhxaTdLqGjCi0e9DxpLYxddD4=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
|
||||
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=
|
||||
github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=
|
||||
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=
|
||||
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
|
||||
github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
|
||||
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
|
||||
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
|
||||
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
|
||||
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
|
||||
github.com/fullstorydev/grpcurl v1.9.3/go.mod h1:/b4Wxe8bG6ndAjlfSUjwseQReUDUvBJiFEB7UllOlUE=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
|
||||
github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
|
||||
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
|
||||
@@ -83,6 +103,7 @@ github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
|
||||
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127 h1:0gkP6mzaMqkmpcJYCFOLkIBwI7xFExG03bbkOkCvUPI=
|
||||
github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98=
|
||||
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -109,6 +130,7 @@ github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq
|
||||
github.com/go-sql-driver/mysql v1.9.3 h1:U/N249h2WzJ3Ukj8SowVFjdtZKfu9vlLZxjPXV1aweo=
|
||||
github.com/go-sql-driver/mysql v1.9.3/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
|
||||
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
|
||||
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
|
||||
@@ -124,6 +146,7 @@ github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
|
||||
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
@@ -137,6 +160,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo=
|
||||
github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
@@ -148,6 +173,7 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/jsonschema-go v0.4.2/go.mod h1:r5quNTdLOYEz95Ru18zA0ydNbBuYoo9tgaYcxEYhJVE=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo=
|
||||
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
@@ -157,16 +183,34 @@ github.com/goph/emperror v0.17.2 h1:yLapQcmEsO0ipe9p5TaN22djm3OFV/TfM/fcYP0/J18=
|
||||
github.com/goph/emperror v0.17.2/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic=
|
||||
github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g=
|
||||
github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k=
|
||||
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=
|
||||
github.com/grafana/pyroscope-go v1.2.8 h1:UvCwIhlx9DeV7F6TW/z8q1Mi4PIm3vuUJ2ZlCEvmA4M=
|
||||
github.com/grafana/pyroscope-go v1.2.8/go.mod h1:SSi59eQ1/zmKoY/BKwa5rSFsJaq+242Bcrr4wPix1g8=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9 h1:c1Us8i6eSmkW+Ez05d3co8kasnuOY813tbMN8i/a3Og=
|
||||
github.com/grafana/pyroscope-go/godeltaprof v0.1.9/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU=
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7 h1:X+2YciYSxvMQK0UZ7sg45ZVabVZBeBuvMkmuI2V3Fak=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.7/go.mod h1:lW34nIZuQ8UDPdkon5fmfp2l3+ZkQ2me/+oecHYLOII=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
|
||||
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
|
||||
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
|
||||
github.com/jackc/pgx/v5 v5.8.0/go.mod h1:QVeDInX2m9VyzvNeiCJVjCkNFqzsNb43204HshNSZKw=
|
||||
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
|
||||
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
|
||||
github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=
|
||||
github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=
|
||||
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jhump/protoreflect v1.18.0/go.mod h1:ezWcltJIVF4zYdIFM+D/sHV4Oh5LNU08ORzCGfwvTz8=
|
||||
github.com/jhump/protoreflect/v2 v2.0.0-beta.1/go.mod h1:D9LBEowZyv8/iSu97FU2zmXG3JxVTmNw21mu63niFzU=
|
||||
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
|
||||
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
@@ -177,10 +221,12 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
@@ -210,10 +256,15 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/meguminnnnnnnnn/go-openai v0.1.1 h1:u/IMMgrj/d617Dh/8BKAwlcstD74ynOJzCtVl+y8xAs=
|
||||
github.com/meguminnnnnnnnn/go-openai v0.1.1/go.mod h1:qs96ysDmxhE4BZoU45I43zcyfnaYxU3X+aRzLko/htY=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
|
||||
github.com/modelcontextprotocol/go-sdk v1.4.0/go.mod h1:Nxc2n+n/GdCebUaqCOhTetptS17SXXNu9IfNTaLDi1E=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -222,6 +273,8 @@ github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFd
|
||||
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/nikolalohinski/gonja v1.5.3 h1:GsA+EEaZDZPGJ8JtpeGN78jidhOlxeJROpqMT9fTj9c=
|
||||
github.com/nikolalohinski/gonja v1.5.3/go.mod h1:RmjwxNiXAEqcq1HeK5SSMmqFJvKOfTfXhkJv6YBtPa4=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
@@ -239,12 +292,15 @@ github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7s
|
||||
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
|
||||
github.com/pelletier/go-toml/v2 v2.3.0 h1:k59bC/lIZREW0/iVaQR8nDHxVq8OVlIzYCOJf421CaM=
|
||||
github.com/pelletier/go-toml/v2 v2.3.0/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/petermattis/goid v0.0.0-20260113132338-7c7de50cc741/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
|
||||
github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
|
||||
github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
|
||||
@@ -262,15 +318,21 @@ github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.54.0 h1:6s1YB9QotYI6Ospeiguknbp2Znb/jZYjZLRXn9kMQBg=
|
||||
github.com/quic-go/quic-go v0.54.0/go.mod h1:e68ZEaCdyviluZmy44P6Iey98v/Wfz6HCjQEm+l8zTY=
|
||||
github.com/rabbitmq/amqp091-go v1.9.0/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/redis/go-redis/v9 v9.18.0 h1:pMkxYPkEbMPwRdenAzUNyFNrDgHx9U+DrBabWNfSRQs=
|
||||
github.com/redis/go-redis/v9 v9.18.0/go.mod h1:k3ufPphLU5YXwNTUcCRXGxUoF1fqxnhFQmscfkCoDA0=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/robertkrimen/otto v0.2.1 h1:FVP0PJ0AHIjC+N4pKCG9yCDz6LHNPCwi/GKID5pGGF0=
|
||||
github.com/robertkrimen/otto v0.2.1/go.mod h1:UPwtJ1Xu7JrLcZjNWN8orJaM5n5YEtqL//farB5FlRY=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
|
||||
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
|
||||
github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ=
|
||||
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
|
||||
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
|
||||
github.com/segmentio/asm v1.1.3/go.mod h1:Ld3L4ZXGNcSLRg4JBsZ3//1+f/TjYl0Mzen/DQy1EJg=
|
||||
github.com/segmentio/encoding v0.5.3/go.mod h1:HS1ZKa3kSN32ZHVZ7ZLPLXWvOVIiZtyJnO1gPH1sKt0=
|
||||
github.com/segmentio/kafka-go v0.4.47 h1:IqziR4pA3vrZq7YdRxaT3w1/5fvIH5qpCwstUanQQB0=
|
||||
github.com/segmentio/kafka-go v0.4.47/go.mod h1:HjF6XbOKh0Pjlkr5GVZxt6CsjjwnmhVOfURM5KMd8qg=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
@@ -294,6 +356,7 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
|
||||
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU=
|
||||
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
|
||||
github.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
@@ -336,8 +399,11 @@ github.com/xdg-go/scram v1.2.0 h1:bYKF2AEwG5rqd1BumT4gAnvwU/M9nBp2pTSxeZw7Wvs=
|
||||
github.com/xdg-go/scram v1.2.0/go.mod h1:3dlrS0iBaWKYVt2ZfA4cj48umJZ+cAEbR6/SjLA88I8=
|
||||
github.com/xdg-go/stringprep v1.0.4 h1:XLI/Ng3O1Atzq0oBs3TWm+5ZVgkq2aqdlvP9JtoZ6c8=
|
||||
github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM=
|
||||
github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU=
|
||||
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
||||
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2/go.mod h1:ILOh0sOhIJR3+L/8afwt/kE++YT040gmv5BQTMR2HP4=
|
||||
github.com/youmark/pkcs8 v0.0.0-20240726163527-a2c0da244d78/go.mod h1:aL8wCCfTfSfmXjznFBSZNN13rSJjlIOI1fUNAtF7rmI=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
@@ -353,8 +419,10 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoB
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY=
|
||||
go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU=
|
||||
go.mongodb.org/mongo-driver/v2 v2.5.0/go.mod h1:yOI9kBsufol30iFsl1slpdq1I0eHPzybRWdyYUs8K/0=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||
go.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=
|
||||
go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
|
||||
go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.40.0 h1:QKdN8ly8zEMrByybbQgv8cWBcdAarwmIPZ6FThrWXJs=
|
||||
@@ -461,6 +529,7 @@ golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/telemetry v0.0.0-20260109210033-bd525da824e2/go.mod h1:b7fPSJ0pKZ3ccUh8gnTONJxhn3c/PS6tyzQvyqw4iA8=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||
@@ -502,6 +571,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20230822172742-b8732ec3820d/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260427160629-7cedc36a6bc4 h1:yOzSCGPx+cp5VO7IxvZ9SBFF7j1tZVcNtlHR2iYKtVo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20260427160629-7cedc36a6bc4/go.mod h1:Q9HWtNeE7tM9npdIsEvqXj1QJIvVoeAV3rtXtS715Cw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260427160629-7cedc36a6bc4 h1:tEkOQcXgF6dH1G+MVKZrfpYvozGrzb91k6ha7jireSM=
|
||||
@@ -528,6 +598,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
|
||||
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
@@ -549,6 +620,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/mysql v1.6.0 h1:eNbLmNTpPpTOVZi8MMxCi2aaIm0ZpInbORNXDwyLGvg=
|
||||
gorm.io/driver/mysql v1.6.0/go.mod h1:D/oCC2GWK3M/dqoLxnOlaNKmXz8WNTfcS9y5ovaSqKo=
|
||||
gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8=
|
||||
gorm.io/gorm v1.31.1 h1:7CA8FTFz/gRfgqgpeKIBcervUn3xSyPUmr6B2WXJ7kg=
|
||||
gorm.io/gorm v1.31.1/go.mod h1:XyQVbO2k6YkOis7C2437jSit3SsDK72s7n7rsSHd+Gs=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
@@ -559,12 +631,14 @@ k8s.io/apimachinery v0.34.3 h1:/TB+SFEiQvN9HPldtlWOTp0hWbJ+fjU+wkxysf/aQnE=
|
||||
k8s.io/apimachinery v0.34.3/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw=
|
||||
k8s.io/client-go v0.34.3 h1:wtYtpzy/OPNYf7WyNBTj3iUA0XaBHVqhv4Iv3tbrF5A=
|
||||
k8s.io/client-go v0.34.3/go.mod h1:OxxeYagaP9Kdf78UrKLa3YZixMCfP6bgPwPwNBQBzpM=
|
||||
k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA=
|
||||
k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts=
|
||||
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5 h1:kBawHLSnx/mYHmRnNUf9d4CpjREbeZuxoSGOX/J+aYM=
|
||||
k8s.io/utils v0.0.0-20260319190234-28399d86e0b5/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE=
|
||||
sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"github.com/LoveLosita/smartflow/backend/cmd"
|
||||
)
|
||||
|
||||
// main 保留仓库根入口的兼容壳,阶段 0 期间仍转发到 cmd.Start()。
|
||||
// main 保留仓库根入口的兼容壳,当前仍转发到 cmd.Start()(等价于 cmd.StartAPI())。
|
||||
// 终态会逐步迁移为各服务各自的独立 main.go。
|
||||
func main() {
|
||||
cmd.Start()
|
||||
|
||||
970
backend/scripts/dev-common.ps1
Normal file
970
backend/scripts/dev-common.ps1
Normal file
@@ -0,0 +1,970 @@
|
||||
$BackendRoot = Split-Path -Parent $PSScriptRoot
|
||||
$RepoRoot = Split-Path -Parent $BackendRoot
|
||||
$ComposeFile = Join-Path $RepoRoot "docker-compose.yml"
|
||||
$StateRoot = Join-Path $BackendRoot ".dev"
|
||||
$PidRoot = Join-Path $StateRoot "pids"
|
||||
$LogRoot = Join-Path $StateRoot "logs"
|
||||
$BinRoot = Join-Path $StateRoot "bin"
|
||||
$DockerConfigRoot = Join-Path $StateRoot "docker-config"
|
||||
$GoCacheRoot = Join-Path $StateRoot "gocache"
|
||||
|
||||
function Initialize-DevState {
|
||||
foreach ($path in @($StateRoot, $PidRoot, $LogRoot, $BinRoot, $DockerConfigRoot, $GoCacheRoot)) {
|
||||
if (-not (Test-Path -LiteralPath $path)) {
|
||||
New-Item -ItemType Directory -Path $path -Force | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
# 1. 当前桌面环境里可能同时注入 Path / PATH 两个大小写不同的环境变量。
|
||||
# 2. Windows PowerShell 的 Env: 提供器与 Start-Process 在这种情况下会报“重复键”错误。
|
||||
# 3. 这里统一只保留一个进程级 Path,避免启动脚本因为环境脏数据直接失败。
|
||||
$effectivePath = ""
|
||||
try {
|
||||
$pathValue = [System.Environment]::GetEnvironmentVariable("Path", "Process")
|
||||
if ($null -ne $pathValue) {
|
||||
$effectivePath = [string]$pathValue
|
||||
}
|
||||
}
|
||||
catch {
|
||||
$effectivePath = ""
|
||||
}
|
||||
[System.Environment]::SetEnvironmentVariable("PATH", $null, "Process")
|
||||
[System.Environment]::SetEnvironmentVariable("Path", $null, "Process")
|
||||
[System.Environment]::SetEnvironmentVariable("Path", $effectivePath, "Process")
|
||||
|
||||
$env:DOCKER_CONFIG = $DockerConfigRoot
|
||||
$env:GOCACHE = $GoCacheRoot
|
||||
}
|
||||
|
||||
function Assert-ToolExists {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$Name
|
||||
)
|
||||
|
||||
if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) {
|
||||
throw "Command not found: $Name"
|
||||
}
|
||||
}
|
||||
|
||||
function Invoke-ExternalCommand {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$FilePath,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string[]]$Arguments,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$WorkingDirectory
|
||||
)
|
||||
|
||||
Push-Location $WorkingDirectory
|
||||
try {
|
||||
& $FilePath @Arguments
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
throw "Command failed: $FilePath $($Arguments -join ' ')"
|
||||
}
|
||||
}
|
||||
finally {
|
||||
Pop-Location
|
||||
}
|
||||
}
|
||||
|
||||
function Get-InfrastructureDefinitions {
|
||||
return @(
|
||||
[pscustomobject]@{
|
||||
Name = "mysql"
|
||||
Container = "smartflow-mysql"
|
||||
TimeoutSec = 120
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "redis"
|
||||
Container = "smartflow-redis"
|
||||
TimeoutSec = 90
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "kafka"
|
||||
Container = "smartflow-kafka"
|
||||
TimeoutSec = 180
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "etcd"
|
||||
Container = "smartflow-etcd"
|
||||
TimeoutSec = 120
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "minio"
|
||||
Container = "smartflow-minio"
|
||||
TimeoutSec = 120
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "milvus-standalone"
|
||||
Container = "smartflow-milvus"
|
||||
TimeoutSec = 240
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function Get-InfrastructureComposeServices {
|
||||
return @(
|
||||
"mysql",
|
||||
"redis",
|
||||
"kafka",
|
||||
"etcd",
|
||||
"minio",
|
||||
"milvus-standalone",
|
||||
"kafka-init"
|
||||
)
|
||||
}
|
||||
|
||||
function Get-KafkaTopicDefinitions {
|
||||
return @(
|
||||
"smartflow.agent.outbox",
|
||||
"smartflow.task.outbox",
|
||||
"smartflow.memory.outbox",
|
||||
"smartflow.active-scheduler.outbox",
|
||||
"smartflow.notification.outbox",
|
||||
"smartflow.taskclass-forum.outbox",
|
||||
"smartflow.llm.outbox",
|
||||
"smartflow.token-store.outbox"
|
||||
)
|
||||
}
|
||||
|
||||
function Get-BackendServiceDefinitions {
|
||||
return @(
|
||||
[pscustomobject]@{
|
||||
Name = "userauth"
|
||||
Package = "./cmd/userauth"
|
||||
BinaryPath = (Join-Path $BinRoot "userauth.exe")
|
||||
Port = 9081
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 60
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "task"
|
||||
Package = "./cmd/task"
|
||||
BinaryPath = (Join-Path $BinRoot "task.exe")
|
||||
Port = 9085
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "schedule"
|
||||
Package = "./cmd/schedule"
|
||||
BinaryPath = (Join-Path $BinRoot "schedule.exe")
|
||||
Port = 9084
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "task-class"
|
||||
Package = "./cmd/task-class"
|
||||
BinaryPath = (Join-Path $BinRoot "task-class.exe")
|
||||
Port = 9086
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "llm"
|
||||
Package = "./cmd/llm"
|
||||
BinaryPath = (Join-Path $BinRoot "llm.exe")
|
||||
Port = 9096
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 120
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "course"
|
||||
Package = "./cmd/course"
|
||||
BinaryPath = (Join-Path $BinRoot "course.exe")
|
||||
Port = 9087
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 120
|
||||
Dependencies = @("llm")
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "tokenstore"
|
||||
Package = "./cmd/tokenstore"
|
||||
BinaryPath = (Join-Path $BinRoot "tokenstore.exe")
|
||||
Port = 9095
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "notification"
|
||||
Package = "./cmd/notification"
|
||||
BinaryPath = (Join-Path $BinRoot "notification.exe")
|
||||
Port = 9082
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @()
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "memory"
|
||||
Package = "./cmd/memory"
|
||||
BinaryPath = (Join-Path $BinRoot "memory.exe")
|
||||
Port = 9088
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 150
|
||||
Dependencies = @("llm")
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "taskclassforum"
|
||||
Aliases = @("forum")
|
||||
Package = "./cmd/taskclassforum"
|
||||
BinaryPath = (Join-Path $BinRoot "taskclassforum.exe")
|
||||
Port = 9090
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @("task-class")
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "active-scheduler"
|
||||
Package = "./cmd/active-scheduler"
|
||||
BinaryPath = (Join-Path $BinRoot "active-scheduler.exe")
|
||||
Port = 9083
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 120
|
||||
Dependencies = @("task", "schedule", "llm")
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "agent"
|
||||
Package = "./cmd/agent"
|
||||
BinaryPath = (Join-Path $BinRoot "agent.exe")
|
||||
Port = 9089
|
||||
ProbeType = "tcp"
|
||||
ProbeTarget = $null
|
||||
StartTimeoutSec = 180
|
||||
Dependencies = @("task", "schedule", "task-class", "memory", "llm")
|
||||
},
|
||||
[pscustomobject]@{
|
||||
Name = "api"
|
||||
Package = "./cmd/api"
|
||||
BinaryPath = (Join-Path $BinRoot "api.exe")
|
||||
Port = 8080
|
||||
ProbeType = "http"
|
||||
ProbeTarget = "http://127.0.0.1:8080/api/v1/health"
|
||||
StartTimeoutSec = 90
|
||||
Dependencies = @(
|
||||
"userauth",
|
||||
"task",
|
||||
"schedule",
|
||||
"task-class",
|
||||
"course",
|
||||
"llm",
|
||||
"tokenstore",
|
||||
"notification",
|
||||
"memory",
|
||||
"taskclassforum",
|
||||
"active-scheduler",
|
||||
"agent"
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
function Get-BackendServiceDefinition {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$Name
|
||||
)
|
||||
|
||||
$serviceDefinitions = @(Get-BackendServiceDefinitions)
|
||||
foreach ($service in $serviceDefinitions) {
|
||||
$aliases = @()
|
||||
if ($service.PSObject.Properties.Name -contains "Aliases" -and $null -ne $service.Aliases) {
|
||||
$aliases = @($service.Aliases)
|
||||
}
|
||||
|
||||
if ($service.Name -eq $Name -or $aliases -contains $Name) {
|
||||
return $service
|
||||
}
|
||||
}
|
||||
|
||||
$availableNames = foreach ($service in $serviceDefinitions) {
|
||||
$aliases = @()
|
||||
if ($service.PSObject.Properties.Name -contains "Aliases" -and $null -ne $service.Aliases) {
|
||||
$aliases = @($service.Aliases)
|
||||
}
|
||||
|
||||
if ($aliases.Count -gt 0) {
|
||||
"{0} ({1})" -f $service.Name, ($aliases -join ", ")
|
||||
}
|
||||
else {
|
||||
$service.Name
|
||||
}
|
||||
}
|
||||
|
||||
throw "Service definition not found: $Name. Available names: $($availableNames -join '; ')"
|
||||
}
|
||||
|
||||
function Get-ServicePidFilePath {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
return (Join-Path $PidRoot "$($Service.Name).json")
|
||||
}
|
||||
|
||||
function Get-ServiceStdoutLogPath {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
return (Join-Path $LogRoot "$($Service.Name).log")
|
||||
}
|
||||
|
||||
function Get-ServiceStderrLogPath {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
return (Join-Path $LogRoot "$($Service.Name).err.log")
|
||||
}
|
||||
|
||||
function New-ServiceLogPaths {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$stamp = Get-Date -Format "yyyyMMdd-HHmmss"
|
||||
return [pscustomobject]@{
|
||||
Stdout = (Join-Path $LogRoot "$($Service.Name)-$stamp.log")
|
||||
Stderr = (Join-Path $LogRoot "$($Service.Name)-$stamp.err.log")
|
||||
}
|
||||
}
|
||||
|
||||
function Read-ServicePidInfo {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$path = Get-ServicePidFilePath -Service $Service
|
||||
if (-not (Test-Path -LiteralPath $path)) {
|
||||
return $null
|
||||
}
|
||||
|
||||
return (Get-Content -LiteralPath $path -Encoding UTF8 | ConvertFrom-Json)
|
||||
}
|
||||
|
||||
function Get-LatestServiceLogPath {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[ValidateSet("stdout", "stderr")]
|
||||
[string]$Stream
|
||||
)
|
||||
|
||||
$pidInfo = Read-ServicePidInfo -Service $Service
|
||||
if ($null -ne $pidInfo) {
|
||||
$managedPath = if ($Stream -eq "stdout") { [string]$pidInfo.StdoutLogPath } else { [string]$pidInfo.StderrLogPath }
|
||||
if (-not [string]::IsNullOrWhiteSpace($managedPath) -and (Test-Path -LiteralPath $managedPath)) {
|
||||
return $managedPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($Stream -eq "stdout") {
|
||||
$candidates = @(Get-ChildItem -LiteralPath $LogRoot -Filter "$($Service.Name)-*.log" -ErrorAction SilentlyContinue |
|
||||
Where-Object { $_.Name -notlike "*.err.log" } |
|
||||
Sort-Object LastWriteTime -Descending)
|
||||
}
|
||||
else {
|
||||
$candidates = @(Get-ChildItem -LiteralPath $LogRoot -Filter "$($Service.Name)-*.err.log" -ErrorAction SilentlyContinue |
|
||||
Sort-Object LastWriteTime -Descending)
|
||||
}
|
||||
|
||||
if ($null -eq $candidates -or $candidates.Count -eq 0) {
|
||||
return $null
|
||||
}
|
||||
|
||||
return $candidates[0].FullName
|
||||
}
|
||||
|
||||
function Write-ServicePidInfo {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[System.Diagnostics.Process]$Process,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$StdoutLogPath,
|
||||
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$StderrLogPath
|
||||
)
|
||||
|
||||
$payload = [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Pid = [int]$Process.Id
|
||||
Port = [int]$Service.Port
|
||||
StartedAt = (Get-Date).ToString("o")
|
||||
BinaryPath = $Service.BinaryPath
|
||||
StdoutLogPath = $StdoutLogPath
|
||||
StderrLogPath = $StderrLogPath
|
||||
}
|
||||
|
||||
$path = Get-ServicePidFilePath -Service $Service
|
||||
$payload | ConvertTo-Json | Set-Content -LiteralPath $path -Encoding UTF8
|
||||
}
|
||||
|
||||
function Remove-ServicePidInfo {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$path = Get-ServicePidFilePath -Service $Service
|
||||
if (Test-Path -LiteralPath $path) {
|
||||
Remove-Item -LiteralPath $path -Force
|
||||
}
|
||||
}
|
||||
|
||||
function Test-ProcessAlive {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[int]$ProcessId
|
||||
)
|
||||
|
||||
return ($null -ne (Get-Process -Id $ProcessId -ErrorAction SilentlyContinue))
|
||||
}
|
||||
|
||||
function Get-ListeningProcessId {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[int]$Port
|
||||
)
|
||||
|
||||
try {
|
||||
$connection = Get-NetTCPConnection -LocalPort $Port -State Listen -ErrorAction Stop |
|
||||
Sort-Object -Property OwningProcess |
|
||||
Select-Object -First 1
|
||||
if ($null -ne $connection) {
|
||||
return [int]$connection.OwningProcess
|
||||
}
|
||||
}
|
||||
catch {
|
||||
}
|
||||
|
||||
$pattern = "^\s*TCP\s+\S+:$Port\s+\S+\s+LISTENING\s+(\d+)\s*$"
|
||||
foreach ($line in (netstat -ano -p tcp)) {
|
||||
if ($line -match $pattern) {
|
||||
return [int]$matches[1]
|
||||
}
|
||||
}
|
||||
|
||||
return $null
|
||||
}
|
||||
|
||||
function Test-TcpPort {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[int]$Port,
|
||||
|
||||
[string]$HostName = "127.0.0.1",
|
||||
|
||||
[int]$TimeoutMs = 500
|
||||
)
|
||||
|
||||
$client = New-Object System.Net.Sockets.TcpClient
|
||||
try {
|
||||
$asyncResult = $client.BeginConnect($HostName, $Port, $null, $null)
|
||||
if (-not $asyncResult.AsyncWaitHandle.WaitOne($TimeoutMs, $false)) {
|
||||
return $false
|
||||
}
|
||||
|
||||
$client.EndConnect($asyncResult)
|
||||
return $true
|
||||
}
|
||||
catch {
|
||||
return $false
|
||||
}
|
||||
finally {
|
||||
$client.Close()
|
||||
}
|
||||
}
|
||||
|
||||
function Test-HttpEndpoint {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$Url,
|
||||
|
||||
[int]$TimeoutSec = 2
|
||||
)
|
||||
|
||||
try {
|
||||
$response = Invoke-WebRequest -Uri $Url -Method Get -TimeoutSec $TimeoutSec -UseBasicParsing
|
||||
return ($response.StatusCode -ge 200 -and $response.StatusCode -lt 300)
|
||||
}
|
||||
catch {
|
||||
return $false
|
||||
}
|
||||
}
|
||||
|
||||
function Test-ServiceHealthy {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
switch ($Service.ProbeType) {
|
||||
"http" {
|
||||
return (Test-HttpEndpoint -Url $Service.ProbeTarget)
|
||||
}
|
||||
default {
|
||||
return (Test-TcpPort -Port $Service.Port)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function Wait-ServiceReady {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service,
|
||||
|
||||
[int]$TimeoutSec,
|
||||
|
||||
[int]$ProcessId
|
||||
)
|
||||
|
||||
$effectiveTimeout = $TimeoutSec
|
||||
if ($effectiveTimeout -le 0) {
|
||||
$effectiveTimeout = [int]$Service.StartTimeoutSec
|
||||
}
|
||||
|
||||
$deadline = (Get-Date).AddSeconds($effectiveTimeout)
|
||||
while ((Get-Date) -lt $deadline) {
|
||||
if ($ProcessId -gt 0 -and -not (Test-ProcessAlive -ProcessId $ProcessId)) {
|
||||
return $false
|
||||
}
|
||||
|
||||
if (Test-ServiceHealthy -Service $Service) {
|
||||
return $true
|
||||
}
|
||||
|
||||
Start-Sleep -Seconds 1
|
||||
}
|
||||
|
||||
return $false
|
||||
}
|
||||
|
||||
function Get-ServiceStatus {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$pidInfo = Read-ServicePidInfo -Service $Service
|
||||
$managedPid = $null
|
||||
$hasStalePid = $false
|
||||
$stdoutLogPath = Get-ServiceStdoutLogPath -Service $Service
|
||||
$stderrLogPath = Get-ServiceStderrLogPath -Service $Service
|
||||
if ($null -ne $pidInfo) {
|
||||
$managedPid = [int]$pidInfo.Pid
|
||||
if (-not [string]::IsNullOrWhiteSpace($pidInfo.StdoutLogPath)) {
|
||||
$stdoutLogPath = [string]$pidInfo.StdoutLogPath
|
||||
}
|
||||
if (-not [string]::IsNullOrWhiteSpace($pidInfo.StderrLogPath)) {
|
||||
$stderrLogPath = [string]$pidInfo.StderrLogPath
|
||||
}
|
||||
if (-not (Test-ProcessAlive -ProcessId $managedPid)) {
|
||||
$hasStalePid = $true
|
||||
$managedPid = $null
|
||||
}
|
||||
}
|
||||
|
||||
$healthy = Test-ServiceHealthy -Service $Service
|
||||
$listenerPid = Get-ListeningProcessId -Port $Service.Port
|
||||
|
||||
if ($null -ne $managedPid) {
|
||||
if ($healthy) {
|
||||
return [pscustomobject]@{
|
||||
State = "managed"
|
||||
Summary = "managed-running"
|
||||
Pid = $managedPid
|
||||
Port = $Service.Port
|
||||
HasStalePid = $false
|
||||
StdoutLog = $stdoutLogPath
|
||||
StderrLog = $stderrLogPath
|
||||
}
|
||||
}
|
||||
|
||||
return [pscustomobject]@{
|
||||
State = "managed_unhealthy"
|
||||
Summary = "managed-not-ready"
|
||||
Pid = $managedPid
|
||||
Port = $Service.Port
|
||||
HasStalePid = $false
|
||||
StdoutLog = $stdoutLogPath
|
||||
StderrLog = $stderrLogPath
|
||||
}
|
||||
}
|
||||
|
||||
if ($healthy) {
|
||||
return [pscustomobject]@{
|
||||
State = "external"
|
||||
Summary = "external-running"
|
||||
Pid = $listenerPid
|
||||
Port = $Service.Port
|
||||
HasStalePid = $hasStalePid
|
||||
StdoutLog = $null
|
||||
StderrLog = $null
|
||||
}
|
||||
}
|
||||
|
||||
if ($null -ne $listenerPid) {
|
||||
return [pscustomobject]@{
|
||||
State = "conflict"
|
||||
Summary = "port-conflict"
|
||||
Pid = $listenerPid
|
||||
Port = $Service.Port
|
||||
HasStalePid = $hasStalePid
|
||||
StdoutLog = $null
|
||||
StderrLog = $null
|
||||
}
|
||||
}
|
||||
|
||||
if ($hasStalePid) {
|
||||
return [pscustomobject]@{
|
||||
State = "stale"
|
||||
Summary = "stale-pid-file"
|
||||
Pid = $null
|
||||
Port = $Service.Port
|
||||
HasStalePid = $true
|
||||
StdoutLog = $stdoutLogPath
|
||||
StderrLog = $stderrLogPath
|
||||
}
|
||||
}
|
||||
|
||||
return [pscustomobject]@{
|
||||
State = "stopped"
|
||||
Summary = "stopped"
|
||||
Pid = $null
|
||||
Port = $Service.Port
|
||||
HasStalePid = $false
|
||||
StdoutLog = $stdoutLogPath
|
||||
StderrLog = $stderrLogPath
|
||||
}
|
||||
}
|
||||
|
||||
function Build-ServiceBinary {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
Initialize-DevState
|
||||
Invoke-ExternalCommand -FilePath "go" -Arguments @(
|
||||
"build",
|
||||
"-o",
|
||||
$Service.BinaryPath,
|
||||
$Service.Package
|
||||
) -WorkingDirectory $BackendRoot
|
||||
}
|
||||
|
||||
function Start-ServiceProcess {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$logPaths = New-ServiceLogPaths -Service $Service
|
||||
$stdoutLog = $logPaths.Stdout
|
||||
$stderrLog = $logPaths.Stderr
|
||||
|
||||
$process = Start-Process -FilePath $Service.BinaryPath `
|
||||
-WorkingDirectory $BackendRoot `
|
||||
-WindowStyle Hidden `
|
||||
-RedirectStandardOutput $stdoutLog `
|
||||
-RedirectStandardError $stderrLog `
|
||||
-PassThru
|
||||
|
||||
Write-ServicePidInfo -Service $Service -Process $process -StdoutLogPath $stdoutLog -StderrLogPath $stderrLog
|
||||
if (-not (Wait-ServiceReady -Service $Service -TimeoutSec $Service.StartTimeoutSec -ProcessId $process.Id)) {
|
||||
if (Test-ProcessAlive -ProcessId $process.Id) {
|
||||
Stop-Process -Id $process.Id -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
Remove-ServicePidInfo -Service $Service
|
||||
throw "Service start failed: $($Service.Name). Check logs: $stdoutLog and $stderrLog"
|
||||
}
|
||||
|
||||
return $process
|
||||
}
|
||||
|
||||
function Stop-ServiceProcess {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$pidInfo = Read-ServicePidInfo -Service $Service
|
||||
if ($null -eq $pidInfo) {
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Action = "skip"
|
||||
Message = "no managed process"
|
||||
}
|
||||
}
|
||||
|
||||
$managedProcessId = [int]$pidInfo.Pid
|
||||
if (Test-ProcessAlive -ProcessId $managedProcessId) {
|
||||
try {
|
||||
Stop-Process -Id $managedProcessId -ErrorAction Stop
|
||||
}
|
||||
catch {
|
||||
Stop-Process -Id $managedProcessId -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
|
||||
$deadline = (Get-Date).AddSeconds(15)
|
||||
while ((Get-Date) -lt $deadline -and (Test-ProcessAlive -ProcessId $managedProcessId)) {
|
||||
Start-Sleep -Milliseconds 500
|
||||
}
|
||||
|
||||
if (Test-ProcessAlive -ProcessId $managedProcessId) {
|
||||
Stop-Process -Id $managedProcessId -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
}
|
||||
|
||||
Remove-ServicePidInfo -Service $Service
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Action = "stopped"
|
||||
Message = "managed process stopped"
|
||||
}
|
||||
}
|
||||
|
||||
function Ensure-ServiceStarted {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$status = Get-ServiceStatus -Service $Service
|
||||
if ($status.HasStalePid) {
|
||||
Remove-ServicePidInfo -Service $Service
|
||||
$status = Get-ServiceStatus -Service $Service
|
||||
}
|
||||
|
||||
switch ($status.State) {
|
||||
"managed" {
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Port = $Service.Port
|
||||
Action = "skip"
|
||||
Detail = "managed and healthy"
|
||||
}
|
||||
}
|
||||
"external" {
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Port = $Service.Port
|
||||
Action = "skip"
|
||||
Detail = "external process already healthy"
|
||||
}
|
||||
}
|
||||
"managed_unhealthy" {
|
||||
if (Wait-ServiceReady -Service $Service -TimeoutSec 15 -ProcessId $status.Pid) {
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Port = $Service.Port
|
||||
Action = "skip"
|
||||
Detail = "existing managed process became healthy"
|
||||
}
|
||||
}
|
||||
|
||||
throw "Service $($Service.Name) already has a managed process, but it is still not healthy after waiting. Check log: $($status.StdoutLog)"
|
||||
}
|
||||
"conflict" {
|
||||
throw "Service $($Service.Name) port $($Service.Port) is occupied by process $($status.Pid), but the health check failed. Resolve the conflict first."
|
||||
}
|
||||
}
|
||||
|
||||
Assert-ServiceDependenciesReady -Service $Service
|
||||
Build-ServiceBinary -Service $Service
|
||||
$process = Start-ServiceProcess -Service $Service
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Port = $Service.Port
|
||||
Action = "start"
|
||||
Detail = "started, PID=$($process.Id)"
|
||||
}
|
||||
}
|
||||
|
||||
function Restart-BackendService {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
$status = Get-ServiceStatus -Service $Service
|
||||
if ($status.HasStalePid) {
|
||||
Remove-ServicePidInfo -Service $Service
|
||||
$status = Get-ServiceStatus -Service $Service
|
||||
}
|
||||
|
||||
switch ($status.State) {
|
||||
"external" {
|
||||
throw "Service $($Service.Name) is managed by an external process. Refuse to restart it automatically."
|
||||
}
|
||||
"conflict" {
|
||||
throw "Service $($Service.Name) port $($Service.Port) is occupied by process $($status.Pid), but the health check failed. Resolve the conflict first."
|
||||
}
|
||||
"managed" {
|
||||
Stop-ServiceProcess -Service $Service | Out-Null
|
||||
}
|
||||
"managed_unhealthy" {
|
||||
Stop-ServiceProcess -Service $Service | Out-Null
|
||||
}
|
||||
}
|
||||
|
||||
Assert-ServiceDependenciesReady -Service $Service
|
||||
Build-ServiceBinary -Service $Service
|
||||
$process = Start-ServiceProcess -Service $Service
|
||||
return [pscustomobject]@{
|
||||
Name = $Service.Name
|
||||
Port = $Service.Port
|
||||
Action = "restart"
|
||||
Detail = "restarted, PID=$($process.Id)"
|
||||
}
|
||||
}
|
||||
|
||||
function Get-ContainerStatus {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$ContainerName
|
||||
)
|
||||
|
||||
$status = cmd.exe /d /c "docker inspect --format ""{{if .State.Health}}{{.State.Health.Status}}{{else}}{{.State.Status}}{{end}}"" $ContainerName 2>nul"
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
return "unavailable"
|
||||
}
|
||||
|
||||
return (($status | Select-Object -First 1).Trim())
|
||||
}
|
||||
|
||||
function Wait-ContainerStatus {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$ContainerDefinition
|
||||
)
|
||||
|
||||
$deadline = (Get-Date).AddSeconds([int]$ContainerDefinition.TimeoutSec)
|
||||
while ((Get-Date) -lt $deadline) {
|
||||
$status = Get-ContainerStatus -ContainerName $ContainerDefinition.Container
|
||||
if ($status -eq "healthy") {
|
||||
return
|
||||
}
|
||||
|
||||
Start-Sleep -Seconds 2
|
||||
}
|
||||
|
||||
throw "Container did not become healthy in time: $($ContainerDefinition.Name)"
|
||||
}
|
||||
|
||||
function Start-BackendInfrastructure {
|
||||
Assert-ToolExists -Name "docker"
|
||||
if (-not (Test-Path -LiteralPath $ComposeFile)) {
|
||||
throw "docker-compose.yml not found: $ComposeFile"
|
||||
}
|
||||
|
||||
$composeServices = Get-InfrastructureComposeServices
|
||||
Invoke-ExternalCommand -FilePath "docker" -Arguments (@(
|
||||
"compose",
|
||||
"-f",
|
||||
$ComposeFile,
|
||||
"up",
|
||||
"-d"
|
||||
) + $composeServices) -WorkingDirectory $RepoRoot
|
||||
|
||||
foreach ($definition in (Get-InfrastructureDefinitions)) {
|
||||
Wait-ContainerStatus -ContainerDefinition $definition
|
||||
}
|
||||
|
||||
Ensure-KafkaTopics
|
||||
}
|
||||
|
||||
function Ensure-KafkaTopics {
|
||||
Assert-ToolExists -Name "docker"
|
||||
|
||||
foreach ($topic in (Get-KafkaTopicDefinitions)) {
|
||||
$arguments = @(
|
||||
"exec",
|
||||
"smartflow-kafka",
|
||||
"/opt/kafka/bin/kafka-topics.sh",
|
||||
"--bootstrap-server",
|
||||
"localhost:9092",
|
||||
"--create",
|
||||
"--if-not-exists",
|
||||
"--topic",
|
||||
$topic,
|
||||
"--partitions",
|
||||
"3",
|
||||
"--replication-factor",
|
||||
"1"
|
||||
)
|
||||
Invoke-ExternalCommand -FilePath "docker" -Arguments $arguments -WorkingDirectory $RepoRoot
|
||||
}
|
||||
}
|
||||
|
||||
function Stop-BackendInfrastructure {
|
||||
Assert-ToolExists -Name "docker"
|
||||
$composeServices = Get-InfrastructureComposeServices
|
||||
Invoke-ExternalCommand -FilePath "docker" -Arguments (@(
|
||||
"compose",
|
||||
"-f",
|
||||
$ComposeFile,
|
||||
"stop"
|
||||
) + $composeServices) -WorkingDirectory $RepoRoot
|
||||
}
|
||||
|
||||
function Get-InfrastructureStatus {
|
||||
$rows = @()
|
||||
foreach ($definition in (Get-InfrastructureDefinitions)) {
|
||||
$rows += [pscustomobject]@{
|
||||
Name = $definition.Name
|
||||
Container = $definition.Container
|
||||
Status = (Get-ContainerStatus -ContainerName $definition.Container)
|
||||
}
|
||||
}
|
||||
return $rows
|
||||
}
|
||||
|
||||
function Assert-ServiceDependenciesReady {
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[pscustomobject]$Service
|
||||
)
|
||||
|
||||
foreach ($dependencyName in $Service.Dependencies) {
|
||||
$dependency = Get-BackendServiceDefinition -Name $dependencyName
|
||||
$status = Get-ServiceStatus -Service $dependency
|
||||
if ($status.State -notin @("managed", "external")) {
|
||||
throw "Dependency not ready for service $($Service.Name): $dependencyName (current state: $($status.Summary))"
|
||||
}
|
||||
}
|
||||
}
|
||||
29
backend/scripts/dev-down.ps1
Normal file
29
backend/scripts/dev-down.ps1
Normal file
@@ -0,0 +1,29 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[switch]$StopInfra
|
||||
)
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
. "$PSScriptRoot\dev-common.ps1"
|
||||
|
||||
Initialize-DevState
|
||||
|
||||
$services = @(Get-BackendServiceDefinitions)
|
||||
[array]::Reverse($services)
|
||||
|
||||
$results = @()
|
||||
foreach ($service in $services) {
|
||||
Write-Host "==> Stop service: $($service.Name)"
|
||||
$results += Stop-ServiceProcess -Service $service
|
||||
}
|
||||
|
||||
if ($StopInfra) {
|
||||
Write-Host "==> Stop infrastructure containers"
|
||||
Stop-BackendInfrastructure
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "Backend stop summary:"
|
||||
$results | Format-Table -AutoSize
|
||||
67
backend/scripts/dev-logs.ps1
Normal file
67
backend/scripts/dev-logs.ps1
Normal file
@@ -0,0 +1,67 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$Service,
|
||||
|
||||
[ValidateSet("stdout", "stderr", "both")]
|
||||
[string]$Stream = "stdout",
|
||||
|
||||
[int]$Tail = 80,
|
||||
|
||||
[switch]$Follow
|
||||
)
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
. "$PSScriptRoot\dev-common.ps1"
|
||||
|
||||
Initialize-DevState
|
||||
|
||||
if ($Tail -le 0) {
|
||||
throw "Tail must be greater than 0"
|
||||
}
|
||||
|
||||
$serviceDef = Get-BackendServiceDefinition -Name $Service
|
||||
$streams = @(
|
||||
if ($Stream -eq "both") {
|
||||
"stdout"
|
||||
"stderr"
|
||||
}
|
||||
else {
|
||||
$Stream
|
||||
}
|
||||
)
|
||||
|
||||
if ($Follow -and $streams.Count -gt 1) {
|
||||
throw "Follow mode only supports a single stream. Use -Stream stdout or -Stream stderr."
|
||||
}
|
||||
|
||||
$paths = @()
|
||||
foreach ($selectedStream in $streams) {
|
||||
$path = Get-LatestServiceLogPath -Service $serviceDef -Stream $selectedStream
|
||||
if ([string]::IsNullOrWhiteSpace($path)) {
|
||||
throw "No log file found for service $Service stream $selectedStream"
|
||||
}
|
||||
|
||||
$paths += [pscustomobject]@{
|
||||
Stream = $selectedStream
|
||||
Path = $path
|
||||
}
|
||||
}
|
||||
$paths = @($paths)
|
||||
|
||||
for ($index = 0; $index -lt $paths.Count; $index++) {
|
||||
$entry = $paths[$index]
|
||||
Write-Host "==> $Service [$($entry.Stream)]"
|
||||
Write-Host "Path: $($entry.Path)"
|
||||
if ($Follow) {
|
||||
Get-Content -LiteralPath $entry.Path -Encoding UTF8 -Tail $Tail -Wait
|
||||
return
|
||||
}
|
||||
|
||||
Get-Content -LiteralPath $entry.Path -Encoding UTF8 -Tail $Tail
|
||||
if ($paths.Count -gt 1 -and $index -lt ($paths.Count - 1)) {
|
||||
Write-Host ""
|
||||
}
|
||||
}
|
||||
32
backend/scripts/dev-status.ps1
Normal file
32
backend/scripts/dev-status.ps1
Normal file
@@ -0,0 +1,32 @@
|
||||
[CmdletBinding()]
|
||||
param()
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
. "$PSScriptRoot\dev-common.ps1"
|
||||
|
||||
Initialize-DevState
|
||||
|
||||
$serviceRows = foreach ($service in (Get-BackendServiceDefinitions)) {
|
||||
$status = Get-ServiceStatus -Service $service
|
||||
[pscustomobject]@{
|
||||
Name = $service.Name
|
||||
Port = $service.Port
|
||||
Status = $status.Summary
|
||||
PID = $(if ($null -ne $status.Pid) { $status.Pid } else { "-" })
|
||||
}
|
||||
}
|
||||
|
||||
Write-Host "Backend service status:"
|
||||
$serviceRows | Format-Table -AutoSize
|
||||
|
||||
if (Get-Command docker -ErrorAction SilentlyContinue) {
|
||||
Write-Host ""
|
||||
Write-Host "Infrastructure status:"
|
||||
Get-InfrastructureStatus | Format-Table -AutoSize
|
||||
}
|
||||
else {
|
||||
Write-Host ""
|
||||
Write-Host "Infrastructure status: docker command not found"
|
||||
}
|
||||
27
backend/scripts/dev-up.ps1
Normal file
27
backend/scripts/dev-up.ps1
Normal file
@@ -0,0 +1,27 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[switch]$SkipInfra
|
||||
)
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
. "$PSScriptRoot\dev-common.ps1"
|
||||
|
||||
Initialize-DevState
|
||||
Assert-ToolExists -Name "go"
|
||||
|
||||
if (-not $SkipInfra) {
|
||||
Write-Host "==> Start infrastructure and wait for health checks"
|
||||
Start-BackendInfrastructure
|
||||
}
|
||||
|
||||
$results = @()
|
||||
:serviceLoop foreach ($service in (Get-BackendServiceDefinitions)) {
|
||||
Write-Host "==> Process service: $($service.Name)"
|
||||
$results += Ensure-ServiceStarted -Service $service
|
||||
}
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "Backend start summary:"
|
||||
$results | Format-Table -AutoSize
|
||||
22
backend/scripts/service-restart.ps1
Normal file
22
backend/scripts/service-restart.ps1
Normal file
@@ -0,0 +1,22 @@
|
||||
[CmdletBinding()]
|
||||
param(
|
||||
[Parameter(Mandatory = $true)]
|
||||
[string]$Service
|
||||
)
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
. "$PSScriptRoot\dev-common.ps1"
|
||||
|
||||
Initialize-DevState
|
||||
Assert-ToolExists -Name "go"
|
||||
|
||||
$serviceDef = Get-BackendServiceDefinition -Name $Service
|
||||
|
||||
Write-Host "==> Restart service: $($serviceDef.Name)"
|
||||
$result = Restart-BackendService -Service $serviceDef
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "Service restart summary:"
|
||||
@($result) | Format-Table -AutoSize
|
||||
7
backend/scripts/services-down.ps1
Normal file
7
backend/scripts/services-down.ps1
Normal file
@@ -0,0 +1,7 @@
|
||||
[CmdletBinding()]
|
||||
param()
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
& (Join-Path $PSScriptRoot "dev-down.ps1")
|
||||
7
backend/scripts/services-up.ps1
Normal file
7
backend/scripts/services-up.ps1
Normal file
@@ -0,0 +1,7 @@
|
||||
[CmdletBinding()]
|
||||
param()
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
& (Join-Path $PSScriptRoot "dev-up.ps1") -SkipInfra
|
||||
@@ -1,164 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
)
|
||||
|
||||
type CourseService struct {
|
||||
// 伸出手:准备接住 DAO
|
||||
courseDAO *dao.CourseDAO
|
||||
scheduleDAO *dao.ScheduleDAO
|
||||
courseImageResponsesClient *llmservice.ArkResponsesClient
|
||||
courseImageConfig CourseImageParseConfig
|
||||
courseImageModel string
|
||||
}
|
||||
|
||||
// NewCourseService 创建 CourseService 实例
|
||||
func NewCourseService(
|
||||
courseDAO *dao.CourseDAO,
|
||||
scheduleDAO *dao.ScheduleDAO,
|
||||
courseImageResponsesClient *llmservice.ArkResponsesClient,
|
||||
courseImageConfig CourseImageParseConfig,
|
||||
courseImageModel string,
|
||||
) *CourseService {
|
||||
return &CourseService{
|
||||
courseDAO: courseDAO,
|
||||
scheduleDAO: scheduleDAO,
|
||||
courseImageResponsesClient: courseImageResponsesClient,
|
||||
courseImageConfig: courseImageConfig,
|
||||
courseImageModel: strings.TrimSpace(courseImageModel),
|
||||
}
|
||||
}
|
||||
|
||||
func isUniqueViolation(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
// 兼容常见 MySQL / PostgreSQL / SQLite 的报错关键字
|
||||
// 也可以进一步精确到你的索引名 idx_user_slot_atomic
|
||||
msg := strings.ToLower(err.Error())
|
||||
if strings.Contains(msg, "duplicate entry") ||
|
||||
strings.Contains(msg, "unique constraint") ||
|
||||
strings.Contains(msg, "unique violation") ||
|
||||
strings.Contains(msg, "duplicate key") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func CheckSingleCourse(req model.UserCheckCourseRequest) bool {
|
||||
for _, arrangement := range req.Arrangements {
|
||||
if arrangement.StartWeek > arrangement.EndWeek ||
|
||||
arrangement.DayOfWeek < 1 || arrangement.DayOfWeek > 7 ||
|
||||
arrangement.StartSection < 1 || arrangement.EndSection < arrangement.StartSection ||
|
||||
arrangement.EndSection > 12 || arrangement.StartWeek < 1 || arrangement.EndWeek > 24 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// AddUserCourses 添加用户课程表
|
||||
func (ss *CourseService) AddUserCourses(ctx context.Context, req model.UserImportCoursesRequest, userID int) ([]model.ScheduleConflictDetail, error) {
|
||||
//1.先校验参数是否正确
|
||||
for _, course := range req.Courses {
|
||||
result := CheckSingleCourse(course)
|
||||
if !result {
|
||||
return nil, respond.WrongCourseInfo
|
||||
}
|
||||
}
|
||||
//2.将前端传来的课程信息转换为 Schedule 和 ScheduleEvent 切片
|
||||
var finalSchedules []model.Schedule
|
||||
var finalScheduleEvents []model.ScheduleEvent
|
||||
var pos []int
|
||||
for _, course := range req.Courses {
|
||||
// 避免取 range 迭代变量字段地址导致指针复用问题
|
||||
location := course.Location
|
||||
for _, arrangement := range course.Arrangements {
|
||||
weekType := arrangement.WeekType
|
||||
for week := arrangement.StartWeek; week <= arrangement.EndWeek; week++ {
|
||||
if weekType == "odd" && week%2 == 0 {
|
||||
continue
|
||||
}
|
||||
if weekType == "even" && week%2 != 0 {
|
||||
continue
|
||||
}
|
||||
//2.转换为 Schedule_event 切片
|
||||
st, ed, err := conv.RelativeTimeToRealTime(week, arrangement.DayOfWeek, arrangement.StartSection, arrangement.EndSection)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
scheduleEvent := model.ScheduleEvent{
|
||||
UserID: userID,
|
||||
Name: course.CourseName,
|
||||
Location: &location,
|
||||
Type: "course",
|
||||
RelID: nil,
|
||||
CanBeEmbedded: course.IsAllowTasks,
|
||||
StartTime: st,
|
||||
EndTime: ed,
|
||||
}
|
||||
finalScheduleEvents = append(finalScheduleEvents, scheduleEvent)
|
||||
//3.转换为 Schedule 切片
|
||||
for section := arrangement.StartSection; section <= arrangement.EndSection; section++ {
|
||||
schedule := model.Schedule{
|
||||
Week: week,
|
||||
DayOfWeek: arrangement.DayOfWeek,
|
||||
Section: section,
|
||||
Status: "normal",
|
||||
UserID: userID,
|
||||
EventID: 0,
|
||||
}
|
||||
finalSchedules = append(finalSchedules, schedule)
|
||||
pos = append(pos, len(finalScheduleEvents)-1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//3.先检测是否重复插入了课程(同一周、同一天、同一节已有课程)
|
||||
exists, err := ss.scheduleDAO.CheckScheduleConflict(ctx, finalSchedules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
return nil, respond.InsertCourseTwice
|
||||
}
|
||||
//4.再检查是否和某些非课程的日程冲突(同一周、同一天、同一节已有非课程日程),并给出具体的冲突信息
|
||||
conflicts, err := ss.scheduleDAO.GetNonCourseScheduleConflicts(ctx, finalSchedules)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(conflicts) > 0 {
|
||||
ret := conv.SchedulesToScheduleConflictDetail(conflicts)
|
||||
return ret, respond.ScheduleConflict
|
||||
}
|
||||
//5.事务:插入两个表要么都成功,要么都回滚
|
||||
err = ss.courseDAO.Transaction(func(txDAO *dao.CourseDAO) error {
|
||||
ids, err := txDAO.AddUserCoursesIntoScheduleEvents(ctx, finalScheduleEvents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 将生成的 ScheduleEvent ID 赋值给对应的 Schedule 的 EventID 字段
|
||||
for i := range finalSchedules {
|
||||
finalSchedules[i].EventID = ids[pos[i]]
|
||||
}
|
||||
if err := txDAO.AddUserCoursesIntoSchedule(ctx, finalSchedules); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
if isUniqueViolation(err) {
|
||||
return nil, respond.InsertCourseTwice
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
@@ -1,295 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultCourseImageMaxBytes = 5 * 1024 * 1024
|
||||
defaultCourseImageMaxTokens = 16384
|
||||
maxCourseImageDraftRows = 256
|
||||
courseImageParseTemperature = 0.1
|
||||
)
|
||||
|
||||
var (
|
||||
ErrCourseImageParserUnavailable = errors.New("course image parser is not configured")
|
||||
ErrCourseImageTooLarge = errors.New("course image is too large")
|
||||
ErrCourseImageUnsupportedMIME = errors.New("course image mime type is not supported")
|
||||
ErrCourseImageEmpty = errors.New("course image is empty")
|
||||
)
|
||||
|
||||
type CourseImageParseConfig struct {
|
||||
MaxImageBytes int64
|
||||
MaxTokens int
|
||||
}
|
||||
|
||||
func NewCourseImageParseConfig(maxImageBytes int64, maxTokens int) CourseImageParseConfig {
|
||||
if maxImageBytes <= 0 {
|
||||
maxImageBytes = defaultCourseImageMaxBytes
|
||||
}
|
||||
if maxTokens <= 0 {
|
||||
maxTokens = defaultCourseImageMaxTokens
|
||||
}
|
||||
return CourseImageParseConfig{
|
||||
MaxImageBytes: maxImageBytes,
|
||||
MaxTokens: maxTokens,
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeCourseImageParseRequest(req model.CourseImageParseRequest, cfg CourseImageParseConfig) (*model.CourseImageParseRequest, error) {
|
||||
req.Filename = strings.TrimSpace(req.Filename)
|
||||
req.MIMEType = strings.TrimSpace(strings.ToLower(req.MIMEType))
|
||||
if len(req.ImageBytes) == 0 {
|
||||
return nil, ErrCourseImageEmpty
|
||||
}
|
||||
if int64(len(req.ImageBytes)) > cfg.MaxImageBytes {
|
||||
return nil, ErrCourseImageTooLarge
|
||||
}
|
||||
|
||||
detected := strings.ToLower(strings.TrimSpace(http.DetectContentType(req.ImageBytes)))
|
||||
if req.MIMEType == "" || req.MIMEType == "application/octet-stream" {
|
||||
req.MIMEType = detected
|
||||
}
|
||||
if !isSupportedCourseImageMIME(req.MIMEType) {
|
||||
if isSupportedCourseImageMIME(detected) {
|
||||
req.MIMEType = detected
|
||||
} else {
|
||||
return nil, ErrCourseImageUnsupportedMIME
|
||||
}
|
||||
}
|
||||
|
||||
if req.Filename == "" {
|
||||
req.Filename = "course-table"
|
||||
}
|
||||
return &req, nil
|
||||
}
|
||||
|
||||
func isSupportedCourseImageMIME(mimeType string) bool {
|
||||
switch strings.TrimSpace(strings.ToLower(mimeType)) {
|
||||
case "image/jpeg", "image/png", "image/webp":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeCourseImageParseResponse(resp *model.CourseImageParseResponse) (*model.CourseImageParseResponse, error) {
|
||||
if resp == nil {
|
||||
return nil, errors.New("course image parse response is nil")
|
||||
}
|
||||
|
||||
resp.DraftStatus = model.CourseImageParseDraftStatus(strings.ToLower(strings.TrimSpace(string(resp.DraftStatus))))
|
||||
resp.Message = strings.TrimSpace(resp.Message)
|
||||
resp.Warnings = normalizeWarningList(resp.Warnings)
|
||||
resp.Rows = normalizeCourseImageParseRows(resp.Rows, &resp.Warnings)
|
||||
|
||||
switch resp.DraftStatus {
|
||||
case model.CourseImageParseDraftStatusSuccess:
|
||||
if len(resp.Rows) == 0 {
|
||||
return nil, errors.New("course image parse response has no rows in success status")
|
||||
}
|
||||
for idx := range resp.Rows {
|
||||
if err := validateCourseImageParseRow(&resp.Rows[idx], true); err != nil {
|
||||
return nil, fmt.Errorf("course image parse success row %d invalid: %w", idx+1, err)
|
||||
}
|
||||
}
|
||||
case model.CourseImageParseDraftStatusPartial:
|
||||
if len(resp.Rows) == 0 {
|
||||
return nil, errors.New("course image parse response has no rows in partial status")
|
||||
}
|
||||
for idx := range resp.Rows {
|
||||
if err := validateCourseImageParseRow(&resp.Rows[idx], false); err != nil {
|
||||
return nil, fmt.Errorf("course image parse partial row %d invalid: %w", idx+1, err)
|
||||
}
|
||||
}
|
||||
case model.CourseImageParseDraftStatusReject:
|
||||
resp.Rows = make([]model.CourseImageParseRow, 0)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported draft_status: %s", resp.DraftStatus)
|
||||
}
|
||||
|
||||
if resp.Message == "" {
|
||||
resp.Message = defaultCourseImageParseMessage(resp.DraftStatus, len(resp.Rows))
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func normalizeCourseImageParseRows(rows []model.CourseImageParseRow, warnings *[]string) []model.CourseImageParseRow {
|
||||
if len(rows) == 0 {
|
||||
return make([]model.CourseImageParseRow, 0)
|
||||
}
|
||||
if len(rows) > maxCourseImageDraftRows {
|
||||
rows = rows[:maxCourseImageDraftRows]
|
||||
appendUniqueWarning(warnings, "识别结果行数超过上限,后端已截断为 256 行,请重点核对。")
|
||||
}
|
||||
|
||||
normalized := make([]model.CourseImageParseRow, 0, len(rows))
|
||||
for idx := range rows {
|
||||
row := rows[idx]
|
||||
row.RowID = strings.TrimSpace(row.RowID)
|
||||
if row.RowID == "" {
|
||||
row.RowID = fmt.Sprintf("row_%03d", idx+1)
|
||||
}
|
||||
row.CourseName = strings.TrimSpace(row.CourseName)
|
||||
row.Location = strings.TrimSpace(row.Location)
|
||||
row.WeekType = normalizeCourseImageWeekType(row.WeekType)
|
||||
row.RawText = strings.TrimSpace(row.RawText)
|
||||
row.RowWarnings = normalizeWarningList(row.RowWarnings)
|
||||
normalizeOptionalPositiveInt(&row.StartWeek)
|
||||
normalizeOptionalPositiveInt(&row.EndWeek)
|
||||
normalizeOptionalPositiveInt(&row.DayOfWeek)
|
||||
normalizeOptionalPositiveInt(&row.StartSection)
|
||||
normalizeOptionalPositiveInt(&row.EndSection)
|
||||
if row.Confidence < 0 {
|
||||
row.Confidence = 0
|
||||
}
|
||||
if row.Confidence > 1 {
|
||||
row.Confidence = 1
|
||||
}
|
||||
if row.CourseName == "" &&
|
||||
row.StartWeek == nil &&
|
||||
row.EndWeek == nil &&
|
||||
row.DayOfWeek == nil &&
|
||||
row.StartSection == nil &&
|
||||
row.EndSection == nil &&
|
||||
row.RawText == "" {
|
||||
appendUniqueWarning(warnings, fmt.Sprintf("存在空白草稿行,后端已自动忽略:%s", row.RowID))
|
||||
continue
|
||||
}
|
||||
normalized = append(normalized, row)
|
||||
}
|
||||
|
||||
return normalized
|
||||
}
|
||||
|
||||
func validateCourseImageParseRow(row *model.CourseImageParseRow, strict bool) error {
|
||||
if row == nil {
|
||||
return errors.New("row is nil")
|
||||
}
|
||||
if strict && row.CourseName == "" {
|
||||
return errors.New("course_name is empty")
|
||||
}
|
||||
if strict && row.WeekType == "" {
|
||||
return errors.New("week_type is empty")
|
||||
}
|
||||
if row.WeekType != "" && row.WeekType != "all" && row.WeekType != "odd" && row.WeekType != "even" {
|
||||
return fmt.Errorf("week_type is invalid: %s", row.WeekType)
|
||||
}
|
||||
|
||||
if err := validateOptionalCourseIntPair(row.StartWeek, row.EndWeek, 1, 24, "week", strict); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := validateOptionalCourseIntPair(row.StartSection, row.EndSection, 1, 12, "section", strict); err != nil {
|
||||
return err
|
||||
}
|
||||
if strict && row.DayOfWeek == nil {
|
||||
return errors.New("day_of_week is empty")
|
||||
}
|
||||
if row.DayOfWeek != nil && (*row.DayOfWeek < 1 || *row.DayOfWeek > 7) {
|
||||
return fmt.Errorf("day_of_week out of range: %d", *row.DayOfWeek)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateOptionalCourseIntPair(start *int, end *int, min int, max int, field string, strict bool) error {
|
||||
if strict {
|
||||
if start == nil || end == nil {
|
||||
return fmt.Errorf("%s range is incomplete", field)
|
||||
}
|
||||
}
|
||||
if start == nil && end == nil {
|
||||
return nil
|
||||
}
|
||||
if start == nil || end == nil {
|
||||
return fmt.Errorf("%s range is incomplete", field)
|
||||
}
|
||||
if *start < min || *start > max {
|
||||
return fmt.Errorf("%s start out of range: %d", field, *start)
|
||||
}
|
||||
if *end < min || *end > max {
|
||||
return fmt.Errorf("%s end out of range: %d", field, *end)
|
||||
}
|
||||
if *start > *end {
|
||||
return fmt.Errorf("%s start is greater than end: %d > %d", field, *start, *end)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func normalizeOptionalPositiveInt(target **int) {
|
||||
if target == nil || *target == nil {
|
||||
return
|
||||
}
|
||||
if **target <= 0 {
|
||||
*target = nil
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeCourseImageWeekType(raw string) string {
|
||||
normalized := strings.ToLower(strings.TrimSpace(raw))
|
||||
switch normalized {
|
||||
case "", "unknown", "null":
|
||||
return ""
|
||||
case "all", "every", "weekly", "each week", "每周", "全周", "全部":
|
||||
return "all"
|
||||
case "odd", "single", "单", "单周":
|
||||
return "odd"
|
||||
case "even", "double", "双", "双周":
|
||||
return "even"
|
||||
default:
|
||||
return normalized
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeWarningList(items []string) []string {
|
||||
if len(items) == 0 {
|
||||
return make([]string, 0)
|
||||
}
|
||||
seen := make(map[string]struct{}, len(items))
|
||||
result := make([]string, 0, len(items))
|
||||
for _, item := range items {
|
||||
trimmed := strings.TrimSpace(item)
|
||||
if trimmed == "" {
|
||||
continue
|
||||
}
|
||||
if _, ok := seen[trimmed]; ok {
|
||||
continue
|
||||
}
|
||||
seen[trimmed] = struct{}{}
|
||||
result = append(result, trimmed)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func appendUniqueWarning(target *[]string, warningText string) {
|
||||
if target == nil {
|
||||
return
|
||||
}
|
||||
trimmed := strings.TrimSpace(warningText)
|
||||
if trimmed == "" {
|
||||
return
|
||||
}
|
||||
for _, existing := range *target {
|
||||
if strings.TrimSpace(existing) == trimmed {
|
||||
return
|
||||
}
|
||||
}
|
||||
*target = append(*target, trimmed)
|
||||
}
|
||||
|
||||
func defaultCourseImageParseMessage(status model.CourseImageParseDraftStatus, rowCount int) string {
|
||||
switch status {
|
||||
case model.CourseImageParseDraftStatusSuccess:
|
||||
return fmt.Sprintf("已识别 %d 条课程安排,请重点核对周次、星期和节次。", rowCount)
|
||||
case model.CourseImageParseDraftStatusPartial:
|
||||
return fmt.Sprintf("已识别 %d 条课程安排,但仍存在不确定字段,请结合 warning 逐项核对。", rowCount)
|
||||
case model.CourseImageParseDraftStatusReject:
|
||||
return "图片信息不足,建议重新上传完整、清晰、包含表头和节次栏的总课表截图。"
|
||||
default:
|
||||
return "课程表图片识别已完成,请人工核对后再导入。"
|
||||
}
|
||||
}
|
||||
@@ -1,228 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
llmservice "github.com/LoveLosita/smartflow/backend/services/llm"
|
||||
)
|
||||
|
||||
// ParseCourseTableImage 使用 Ark SDK Responses 解析课程表图片。
|
||||
func (ss *CourseService) ParseCourseTableImage(ctx context.Context, req model.CourseImageParseRequest) (*model.CourseImageParseResponse, error) {
|
||||
if ss == nil || ss.courseImageResponsesClient == nil {
|
||||
modelName := ""
|
||||
if ss != nil {
|
||||
modelName = ss.courseImageModel
|
||||
}
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] parser unavailable model_name=%q filename=%q mime=%q bytes=%d",
|
||||
modelName,
|
||||
req.Filename,
|
||||
req.MIMEType,
|
||||
len(req.ImageBytes),
|
||||
)
|
||||
return nil, ErrCourseImageParserUnavailable
|
||||
}
|
||||
|
||||
normalizedReq, err := normalizeCourseImageParseRequest(req, ss.courseImageConfig)
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] request normalization failed filename=%q mime=%q bytes=%d err=%v",
|
||||
req.Filename,
|
||||
req.MIMEType,
|
||||
len(req.ImageBytes),
|
||||
err,
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] normalized request model_name=%q filename=%q mime=%q bytes=%d max_bytes=%d",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
len(normalizedReq.ImageBytes),
|
||||
ss.courseImageConfig.MaxImageBytes,
|
||||
)
|
||||
|
||||
messages, base64Chars, promptChars := buildCourseImageParseResponsesMessages(normalizedReq)
|
||||
startAt := time.Now()
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] model invoke start model_name=%q filename=%q mime=%q message_count=%d base64_chars=%d prompt_chars=%d payload_chars_estimate=%d thinking=%s temperature=%.2f max_output_tokens=%d text_format=%s",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
len(messages),
|
||||
base64Chars,
|
||||
promptChars,
|
||||
base64Chars+promptChars+len(strings.TrimSpace(courseImageParseSystemPrompt)),
|
||||
llmservice.ThinkingModeDisabled,
|
||||
courseImageParseTemperature,
|
||||
ss.courseImageConfig.MaxTokens,
|
||||
"json_object",
|
||||
)
|
||||
|
||||
// 1. 课程表图片识别输出体量大,显式透传 max_output_tokens,避免被默认值截断。
|
||||
// 2. text_format 固定为 json_object,降低输出混入解释文本导致解析失败的概率。
|
||||
// 3. thinking 显式关闭,优先保证课程导入链路稳定性。
|
||||
draft, rawResult, err := llmservice.GenerateArkResponsesJSON[model.CourseImageParseResponse](ctx, ss.courseImageResponsesClient, messages, llmservice.ArkResponsesOptions{
|
||||
Temperature: courseImageParseTemperature,
|
||||
MaxOutputTokens: ss.courseImageConfig.MaxTokens,
|
||||
Thinking: llmservice.ThinkingModeDisabled,
|
||||
TextFormat: "json_object",
|
||||
})
|
||||
if err != nil {
|
||||
rawText := ""
|
||||
rawChars := 0
|
||||
status := ""
|
||||
incompleteReason := ""
|
||||
errorCode := ""
|
||||
errorMessage := ""
|
||||
inputTokens := int64(0)
|
||||
outputTokens := int64(0)
|
||||
totalTokens := int64(0)
|
||||
if rawResult != nil {
|
||||
rawText = strings.TrimSpace(rawResult.Text)
|
||||
rawChars = len(rawText)
|
||||
status = strings.TrimSpace(rawResult.Status)
|
||||
incompleteReason = strings.TrimSpace(rawResult.IncompleteReason)
|
||||
errorCode = strings.TrimSpace(rawResult.ErrorCode)
|
||||
errorMessage = strings.TrimSpace(rawResult.ErrorMessage)
|
||||
if rawResult.Usage != nil {
|
||||
inputTokens = rawResult.Usage.InputTokens
|
||||
outputTokens = rawResult.Usage.OutputTokens
|
||||
totalTokens = rawResult.Usage.TotalTokens
|
||||
}
|
||||
}
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] model invoke failed model_name=%q filename=%q mime=%q cost_ms=%d err=%v status=%q incomplete_reason=%q error_code=%q error_message=%q input_tokens=%d output_tokens=%d total_tokens=%d raw_chars=%d raw_full=\n%s",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
time.Since(startAt).Milliseconds(),
|
||||
err,
|
||||
status,
|
||||
incompleteReason,
|
||||
errorCode,
|
||||
errorMessage,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
totalTokens,
|
||||
rawChars,
|
||||
rawText,
|
||||
)
|
||||
if isCourseImageOutputTruncated(rawResult) {
|
||||
return nil, fmt.Errorf(
|
||||
"课程表识别输出疑似被 max_output_tokens 截断:status=%s incomplete_reason=%s output_tokens=%d max_output_tokens=%d",
|
||||
status,
|
||||
incompleteReason,
|
||||
outputTokens,
|
||||
ss.courseImageConfig.MaxTokens,
|
||||
)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rawText := ""
|
||||
rawChars := 0
|
||||
status := ""
|
||||
incompleteReason := ""
|
||||
errorCode := ""
|
||||
errorMessage := ""
|
||||
inputTokens := int64(0)
|
||||
outputTokens := int64(0)
|
||||
totalTokens := int64(0)
|
||||
if rawResult != nil {
|
||||
rawText = strings.TrimSpace(rawResult.Text)
|
||||
rawChars = len(rawText)
|
||||
status = strings.TrimSpace(rawResult.Status)
|
||||
incompleteReason = strings.TrimSpace(rawResult.IncompleteReason)
|
||||
errorCode = strings.TrimSpace(rawResult.ErrorCode)
|
||||
errorMessage = strings.TrimSpace(rawResult.ErrorMessage)
|
||||
if rawResult.Usage != nil {
|
||||
inputTokens = rawResult.Usage.InputTokens
|
||||
outputTokens = rawResult.Usage.OutputTokens
|
||||
totalTokens = rawResult.Usage.TotalTokens
|
||||
}
|
||||
}
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] model invoke success model_name=%q filename=%q mime=%q cost_ms=%d status=%q incomplete_reason=%q error_code=%q error_message=%q input_tokens=%d output_tokens=%d total_tokens=%d raw_chars=%d raw_full=\n%s",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedReq.MIMEType,
|
||||
time.Since(startAt).Milliseconds(),
|
||||
status,
|
||||
incompleteReason,
|
||||
errorCode,
|
||||
errorMessage,
|
||||
inputTokens,
|
||||
outputTokens,
|
||||
totalTokens,
|
||||
rawChars,
|
||||
rawText,
|
||||
)
|
||||
|
||||
normalizedDraft, err := normalizeCourseImageParseResponse(draft)
|
||||
if err != nil {
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] draft normalization failed model_name=%q filename=%q err=%v draft_status=%v row_count=%d",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
err,
|
||||
draft.DraftStatus,
|
||||
len(draft.Rows),
|
||||
)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Printf(
|
||||
"[COURSE_PARSE][SERVICE] draft normalization success model_name=%q filename=%q draft_status=%s rows=%d warnings=%d",
|
||||
ss.courseImageModel,
|
||||
normalizedReq.Filename,
|
||||
normalizedDraft.DraftStatus,
|
||||
len(normalizedDraft.Rows),
|
||||
len(normalizedDraft.Warnings),
|
||||
)
|
||||
|
||||
return normalizedDraft, nil
|
||||
}
|
||||
|
||||
func buildCourseImageParseResponsesMessages(req *model.CourseImageParseRequest) ([]llmservice.ArkResponsesMessage, int, int) {
|
||||
userPrompt := fmt.Sprintf(courseImageParseUserPromptTemplate, req.Filename, req.MIMEType)
|
||||
base64Data := base64.StdEncoding.EncodeToString(req.ImageBytes)
|
||||
imageDataURL := fmt.Sprintf("data:%s;base64,%s", req.MIMEType, base64Data)
|
||||
|
||||
messages := []llmservice.ArkResponsesMessage{
|
||||
{
|
||||
Role: "system",
|
||||
Text: strings.TrimSpace(courseImageParseSystemPrompt),
|
||||
},
|
||||
{
|
||||
Role: "user",
|
||||
Text: strings.TrimSpace(userPrompt),
|
||||
ImageURL: imageDataURL,
|
||||
ImageDetail: "high",
|
||||
},
|
||||
}
|
||||
return messages, len(base64Data), len(strings.TrimSpace(userPrompt))
|
||||
}
|
||||
|
||||
func isCourseImageOutputTruncated(rawResult *llmservice.ArkResponsesResult) bool {
|
||||
if rawResult == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
reason := strings.ToLower(strings.TrimSpace(rawResult.IncompleteReason))
|
||||
if strings.Contains(reason, "max_output_tokens") ||
|
||||
strings.Contains(reason, "max_tokens") ||
|
||||
strings.Contains(reason, "length") {
|
||||
return true
|
||||
}
|
||||
|
||||
return strings.EqualFold(strings.TrimSpace(rawResult.Status), "incomplete") && reason == ""
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package service
|
||||
|
||||
const courseImageParseSystemPrompt = `
|
||||
你是 SmartFlow 的“总课表图片识别器”。你的唯一任务是读取用户上传的总课表图片,输出结构化 JSON 草稿,供前端人工核对后再导入系统。
|
||||
|
||||
必须遵守以下规则:
|
||||
1. 只能输出一个 JSON 对象,禁止输出 Markdown、代码块、解释文字或额外前后缀。
|
||||
2. 顶层 JSON 结构必须是:
|
||||
{
|
||||
"draft_status": "success | partial | reject",
|
||||
"message": "字符串",
|
||||
"warnings": ["字符串"],
|
||||
"rows": [
|
||||
{
|
||||
"row_id": "字符串,可为空",
|
||||
"course_name": "字符串",
|
||||
"location": "字符串",
|
||||
"is_allow_tasks": false,
|
||||
"start_week": 1,
|
||||
"end_week": 16,
|
||||
"day_of_week": 1,
|
||||
"start_section": 1,
|
||||
"end_section": 2,
|
||||
"week_type": "all | odd | even",
|
||||
"confidence": 0.92,
|
||||
"raw_text": "原图中对应的近似文本",
|
||||
"row_warnings": ["字符串"]
|
||||
}
|
||||
]
|
||||
}
|
||||
3. rows 中一行只表达一个“课程安排片段”,不要把同一门课的多个时间段强行合并成一行。
|
||||
4. is_allow_tasks 无法从课表图片稳定识别时,一律返回 false,不要自行猜测。
|
||||
5. 若图片完整且大部分字段明确,可返回 success。
|
||||
6. 若图片可识别出部分行,但存在裁切、模糊、遮挡、单双周不清晰、节次/周次不确定等问题,返回 partial。
|
||||
7. 若图片严重不完整、分辨率过低、主体不是课表、无法可靠识别,返回 reject,同时 rows 置为空数组。
|
||||
8. 不要编造信息。看不清的数值字段请返回 null,并在 row_warnings 或 warnings 中明确说明原因。
|
||||
9. week_type 只能是:
|
||||
- all:每周/未标注单双周
|
||||
- odd:单周
|
||||
- even:双周
|
||||
10. day_of_week 使用 1-7 表示周一到周日。
|
||||
11. start_section/end_section 使用原子节次编号,例如 1-2 节应输出 start_section=1, end_section=2。
|
||||
12. confidence 取 0 到 1 之间的小数;不确定时可以偏保守。
|
||||
13. 如果 rows 不为空,优先保证“周次、星期、节次”准确,地点可为空字符串。
|
||||
14. 当图片信息不足时,应明确拒绝或降级为 partial,而不是强行补全。
|
||||
15. 填写json中course_name时,严格按照截图的课程名称来。例如,有的课可能既有本体,又有实验课,这算是两门不同的课。
|
||||
16. 周信息是可能出现中断的,例如一节课可能是第1周和第6-12周,这是正常的课程安排,请不要擅自更改。
|
||||
`
|
||||
|
||||
const courseImageParseUserPromptTemplate = `
|
||||
请识别这张总课表图片,并严格按照约定 JSON 输出草稿。
|
||||
|
||||
补充约束:
|
||||
1. 文件名:%s
|
||||
2. MIME 类型:%s
|
||||
3. 这是一张供学生核对的“导入草稿”,不是最终真值;不确定就留空或写 warning。
|
||||
4. 如果图片右侧、底部、表头、周次栏、节次栏有缺失,请优先返回 partial 或 reject。
|
||||
5. rows 里尽量保留 raw_text,方便前端逐行回显核对。
|
||||
`
|
||||
@@ -1,126 +0,0 @@
|
||||
package events
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
kafkabus "github.com/LoveLosita/smartflow/backend/infra/kafka"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
contracts "github.com/LoveLosita/smartflow/backend/shared/contracts/userauth"
|
||||
"github.com/LoveLosita/smartflow/backend/shared/ports"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
// EventTypeChatTokenUsageAdjustRequested 是“会话 token 额度调整”事件类型。
|
||||
// 命名约束:
|
||||
// 1. 只表达业务语义,不泄露 outbox/kafka 实现细节;
|
||||
// 2. 作为稳定路由键长期保留,后续演进优先通过 event_version。
|
||||
EventTypeChatTokenUsageAdjustRequested = "chat.token.usage.adjust.requested"
|
||||
)
|
||||
|
||||
// RegisterChatTokenUsageAdjustHandler 注册“会话 token 额度调整”消费者。
|
||||
// 职责边界:
|
||||
// 1. 只处理 token 调整事件,不处理聊天正文落库;
|
||||
// 2. 先写本地账本,再调用 userauth 侧做额度同步;
|
||||
// 3. 非法载荷直接标记 dead,避免无意义重试。
|
||||
func RegisterChatTokenUsageAdjustHandler(
|
||||
bus OutboxBus,
|
||||
outboxRepo *outboxinfra.Repository,
|
||||
repoManager *dao.RepoManager,
|
||||
adjuster ports.TokenUsageAdjuster,
|
||||
) error {
|
||||
if bus == nil {
|
||||
return errors.New("event bus is nil")
|
||||
}
|
||||
if outboxRepo == nil {
|
||||
return errors.New("outbox repository is nil")
|
||||
}
|
||||
if repoManager == nil {
|
||||
return errors.New("repo manager is nil")
|
||||
}
|
||||
|
||||
eventOutboxRepo, err := scopedOutboxRepoForEvent(outboxRepo, EventTypeChatTokenUsageAdjustRequested)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
handler := func(ctx context.Context, envelope kafkabus.Envelope) error {
|
||||
var payload model.ChatTokenUsageAdjustPayload
|
||||
if unmarshalErr := json.Unmarshal(envelope.Payload, &payload); unmarshalErr != nil {
|
||||
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "解析会话 token 调整载荷失败: "+unmarshalErr.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
if payload.UserID <= 0 || payload.TokensDelta <= 0 || payload.ConversationID == "" {
|
||||
_ = eventOutboxRepo.MarkDead(ctx, envelope.OutboxID, "会话 token 调整载荷无效: user_id/conversation_id/tokens_delta 非法")
|
||||
return nil
|
||||
}
|
||||
|
||||
eventID := strings.TrimSpace(envelope.EventID)
|
||||
if eventID == "" {
|
||||
eventID = strconv.FormatInt(envelope.OutboxID, 10)
|
||||
}
|
||||
|
||||
if err := eventOutboxRepo.ConsumeInTx(ctx, envelope.OutboxID, func(tx *gorm.DB) error {
|
||||
txM := repoManager.WithTx(tx)
|
||||
return txM.Agent.AdjustTokenUsageInTx(ctx, payload.UserID, payload.ConversationID, payload.TokensDelta, eventID)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if adjuster == nil {
|
||||
return errors.New("userauth token adjuster is nil")
|
||||
}
|
||||
if _, err := adjuster.AdjustTokenUsage(ctx, contracts.AdjustTokenUsageRequest{
|
||||
EventID: eventID,
|
||||
UserID: payload.UserID,
|
||||
TokenDelta: payload.TokensDelta,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return eventOutboxRepo.MarkConsumed(ctx, envelope.OutboxID)
|
||||
}
|
||||
|
||||
return bus.RegisterEventHandler(EventTypeChatTokenUsageAdjustRequested, handler)
|
||||
}
|
||||
|
||||
// PublishChatTokenUsageAdjustRequested 发布“会话 token 额度调整”事件。
|
||||
// 1. 这里只保证 outbox 写入成功,不等待消费结果;
|
||||
// 2. 业务层只关心 DTO,不关心 outbox/Kafka 细节。
|
||||
func PublishChatTokenUsageAdjustRequested(
|
||||
ctx context.Context,
|
||||
publisher outboxinfra.EventPublisher,
|
||||
payload model.ChatTokenUsageAdjustPayload,
|
||||
) error {
|
||||
if publisher == nil {
|
||||
return errors.New("event publisher is nil")
|
||||
}
|
||||
if payload.UserID <= 0 {
|
||||
return errors.New("invalid user_id")
|
||||
}
|
||||
if payload.TokensDelta <= 0 {
|
||||
return errors.New("invalid tokens_delta")
|
||||
}
|
||||
if payload.ConversationID == "" {
|
||||
return errors.New("invalid conversation_id")
|
||||
}
|
||||
if payload.TriggeredAt.IsZero() {
|
||||
payload.TriggeredAt = time.Now()
|
||||
}
|
||||
|
||||
return publisher.Publish(ctx, outboxinfra.PublishRequest{
|
||||
EventType: EventTypeChatTokenUsageAdjustRequested,
|
||||
EventVersion: outboxinfra.DefaultEventVersion,
|
||||
MessageKey: payload.ConversationID,
|
||||
AggregateID: strconv.Itoa(payload.UserID) + ":" + payload.ConversationID,
|
||||
Payload: payload,
|
||||
})
|
||||
}
|
||||
@@ -1,866 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/logic"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/go-redis/redis/v8"
|
||||
)
|
||||
|
||||
type ScheduleService struct {
|
||||
scheduleDAO *dao.ScheduleDAO
|
||||
taskClassDAO *dao.TaskClassDAO
|
||||
repoManager *dao.RepoManager // 统一管理多个 DAO 的事务
|
||||
cacheDAO *dao.CacheDAO // 需要在 ScheduleService 中使用缓存
|
||||
}
|
||||
|
||||
func NewScheduleService(scheduleDAO *dao.ScheduleDAO, taskClassDAO *dao.TaskClassDAO, repoManager *dao.RepoManager, cacheDAO *dao.CacheDAO) *ScheduleService {
|
||||
return &ScheduleService{
|
||||
scheduleDAO: scheduleDAO,
|
||||
taskClassDAO: taskClassDAO,
|
||||
repoManager: repoManager,
|
||||
cacheDAO: cacheDAO,
|
||||
}
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserTodaySchedule(ctx context.Context, userID int) ([]model.UserTodaySchedule, error) {
|
||||
//1.先尝试从缓存获取数据
|
||||
cachedResp, err := ss.cacheDAO.GetUserTodayScheduleFromCache(ctx, userID)
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//2.获取当前日期
|
||||
/*curTime := time.Now().Format("2006-01-02")*/
|
||||
curTime := "2026-03-02" //测试数据
|
||||
week, dayOfWeek, err := conv.RealDateToRelativeDate(curTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.查询用户当天的日程安排
|
||||
schedules, err := ss.scheduleDAO.GetUserTodaySchedule(ctx, userID, week, dayOfWeek) //测试数据
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//4.转换为前端需要的格式
|
||||
todaySchedules := conv.SchedulesToUserTodaySchedule(schedules)
|
||||
//5.将查询结果存入缓存,设置过期时间为当天结束
|
||||
err = ss.cacheDAO.SetUserTodayScheduleToCache(ctx, userID, todaySchedules)
|
||||
return todaySchedules, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserWeeklySchedule(ctx context.Context, userID, week int) (*model.UserWeekSchedule, error) {
|
||||
//1.先检查 week 参数是否合法
|
||||
if week < 0 || week > 25 {
|
||||
return nil, respond.WeekOutOfRange
|
||||
}
|
||||
//2.先看看缓存里有没有数据(如果有的话直接返回,没有的话继续查库)
|
||||
cachedResp, err := ss.cacheDAO.GetUserWeeklyScheduleFromCache(ctx, userID, week)
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//3.查询用户每周的日程安排
|
||||
//如果没有传入 week 参数,则默认查询当前周的日程安排
|
||||
if week == 0 {
|
||||
curTime := time.Now().Format("2006-01-02")
|
||||
var err error
|
||||
week, _, err = conv.RealDateToRelativeDate(curTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
schedules, err := ss.scheduleDAO.GetUserWeeklySchedule(ctx, userID, week)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.转换为前端需要的格式
|
||||
weeklySchedule := conv.SchedulesToUserWeeklySchedule(schedules)
|
||||
weeklySchedule.Week = week
|
||||
//4.将查询结果存入缓存,设置过期时间为一周(或者根据实际情况调整)
|
||||
err = ss.cacheDAO.SetUserWeeklyScheduleToCache(ctx, userID, weeklySchedule)
|
||||
return weeklySchedule, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) DeleteScheduleEvent(ctx context.Context, requests []model.UserDeleteScheduleEvent, userID int) error {
|
||||
err := ss.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
for _, req := range requests {
|
||||
//1.如果要删课程和嵌入的事件
|
||||
if req.DeleteEmbeddedTask && req.DeleteCourse {
|
||||
//通过schedule表的embedded_task_id字段找到对应的task_id
|
||||
taskID, err := txM.Schedule.GetScheduleEmbeddedTaskID(ctx, req.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//再将task_items表中对应的embedded_time字段设置为null
|
||||
if taskID != 0 {
|
||||
err = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
//再删除课程事件和嵌入的事件(通过级联删除实现)
|
||||
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
//2.只删课程/事件
|
||||
if req.DeleteCourse {
|
||||
//2.1.检查课程是否有嵌入的任务事件
|
||||
exists, err := txM.Schedule.IfScheduleEventIDExists(ctx, req.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !exists {
|
||||
return respond.WrongScheduleEventID
|
||||
}
|
||||
embeddedTaskID, err := txM.Schedule.GetScheduleEmbeddedTaskID(ctx, req.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//2.2.如果有,则需另外为其创建新的scheduleEvent(type=task)
|
||||
//课程事件先删除后再创建任务事件
|
||||
if embeddedTaskID != 0 {
|
||||
//2.2.1.先通过id取出taskClassItem详情
|
||||
taskClassItem, err := txM.TaskClass.GetTaskClassItemByID(ctx, embeddedTaskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//下方开启事务,删除课程事件并创建新的任务事件
|
||||
//2.2.2.删除课程事件
|
||||
txErr := txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//2.2.3.再复用代码创建新的scheduleEvent,下方代码改编自AddTaskClassItemIntoSchedule函数
|
||||
//直接构造Schedule模型
|
||||
sections := make([]int, 0, taskClassItem.EmbeddedTime.SectionTo-taskClassItem.EmbeddedTime.SectionFrom+1)
|
||||
// 这里的 req 主要是为了传递 Week 和 DayOfWeek,其他字段不需要了
|
||||
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(
|
||||
&model.UserInsertTaskClassItemToScheduleRequest{
|
||||
Week: taskClassItem.EmbeddedTime.Week,
|
||||
DayOfWeek: taskClassItem.EmbeddedTime.DayOfWeek},
|
||||
taskClassItem, nil, userID, taskClassItem.EmbeddedTime.SectionFrom, taskClassItem.EmbeddedTime.SectionTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//将节次区间转换为节次切片,方便后续检查冲突
|
||||
for section := taskClassItem.EmbeddedTime.SectionFrom; section <= taskClassItem.EmbeddedTime.SectionTo; section++ {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
//单用户不存在删除时这个格子被占用的情况,所以不检查冲突了
|
||||
/*//4.1 统一检查冲突(避免逐条查库)
|
||||
conflict, err := ss.scheduleDAO.HasUserScheduleConflict(ctx, userID, req.Week, req.DayOfWeek, sections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conflict {
|
||||
return respond.ScheduleConflict
|
||||
}*/
|
||||
// 5. 写入数据库(通过 RepoManager 统一管理事务)
|
||||
// 这里的 sv.daoManager 是你在初始化 Service 时注入的全局 RepoManager 实例
|
||||
// 5.1 使用事务中的 ScheduleRepo 插入 Event
|
||||
eventID, txErr := txM.Schedule.AddScheduleEvent(scheduleEvent)
|
||||
if txErr != nil {
|
||||
return txErr // 触发回滚
|
||||
}
|
||||
// 5.2 关联 ID(纯内存操作,无需 tx)
|
||||
for i := range schedules {
|
||||
schedules[i].EventID = eventID
|
||||
}
|
||||
// 5.3 使用事务中的 ScheduleRepo 批量插入原子槽位
|
||||
if _, txErr = txM.Schedule.AddSchedules(schedules); txErr != nil {
|
||||
return txErr // 触发回滚
|
||||
}
|
||||
// 5.4 使用事务中的 TaskRepo 更新任务状态
|
||||
if txErr = txM.TaskClass.UpdateTaskClassItemEmbeddedTime(ctx, embeddedTaskID, taskClassItem.EmbeddedTime); txErr != nil {
|
||||
return txErr // 触发回滚
|
||||
}
|
||||
continue
|
||||
}
|
||||
//2.3.如果没有嵌入的事件,就直接删除课程事件
|
||||
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, req.ID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//先通过rel_id找到对应的task_id
|
||||
taskID, txErr := txM.Schedule.GetRelIDByScheduleEventID(ctx, req.ID)
|
||||
if txErr != nil {
|
||||
return err
|
||||
}
|
||||
//2.4.如果是任务块,转而去清除task_items表中的嵌入时间
|
||||
if taskID != 0 {
|
||||
//再将task_items表中对应的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
//3.只删嵌入的事件
|
||||
if req.DeleteEmbeddedTask {
|
||||
//下面先设置schedule表的embedded_task_id字段为null,再设置task_items表的embedded_time字段为null,实现删除嵌入事件的效果
|
||||
//3.1.先将schedule表的embedded_task_id字段设置为null
|
||||
taskID, txErr := txM.Schedule.SetScheduleEmbeddedTaskIDToNull(ctx, req.ID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//3.2.再将task_items表的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserRecentCompletedSchedules(ctx context.Context, userID, index, limit int) (*model.UserRecentCompletedScheduleResponse, error) {
|
||||
//1.先查缓存
|
||||
cachedResp, err := ss.cacheDAO.GetUserRecentCompletedSchedulesFromCache(ctx, userID, index, limit)
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//2.查询用户最近完成的日程安排
|
||||
//获取现在的时间
|
||||
/*nowTime := time.Now()*/
|
||||
nowTime := time.Date(2026, 6, 30, 12, 0, 0, 0, time.Local) //测试数据
|
||||
schedules, err := ss.scheduleDAO.GetUserRecentCompletedSchedules(ctx, nowTime, userID, index, limit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.转换为前端需要的格式
|
||||
result := conv.SchedulesToRecentCompletedSchedules(schedules)
|
||||
//4.将查询结果存入缓存,设置过期时间为30分钟(根据实际情况调整)
|
||||
err = ss.cacheDAO.SetUserRecentCompletedSchedulesToCache(ctx, userID, index, limit, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) GetUserOngoingSchedule(ctx context.Context, userID int) (*model.OngoingSchedule, error) {
|
||||
//1.先查缓存
|
||||
cachedResp, err := ss.cacheDAO.GetUserOngoingScheduleFromCache(ctx, userID)
|
||||
if err == nil && cachedResp == nil {
|
||||
// 之前缓存过没有正在进行的日程,直接返回 nil
|
||||
return nil, respond.NoOngoingOrUpcomingSchedule
|
||||
}
|
||||
if err == nil {
|
||||
// 缓存命中,直接返回
|
||||
return cachedResp, nil
|
||||
}
|
||||
// 如果是 redis.Nil 错误,说明缓存未命中,我们继续查库
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
//2.查询用户正在进行的日程安排
|
||||
/*nowTime := time.Now()*/
|
||||
nowTime := time.Date(2026, 6, 30, 18, 50, 0, 0, time.Local) //测试数据
|
||||
schedules, err := ss.scheduleDAO.GetUserOngoingSchedule(ctx, userID, nowTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//3.转换为前端需要的格式
|
||||
result := conv.SchedulesToUserOngoingSchedule(schedules)
|
||||
if result != nil {
|
||||
if result.StartTime.After(nowTime) {
|
||||
result.TimeStatus = "upcoming"
|
||||
} else {
|
||||
result.TimeStatus = "ongoing"
|
||||
}
|
||||
}
|
||||
//4.将查询结果存入缓存,设置过期时间直到此任务结束(根据实际情况调整)
|
||||
err = ss.cacheDAO.SetUserOngoingScheduleToCache(ctx, userID, result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result == nil {
|
||||
// 没有正在进行或即将开始的日程,返回特定错误
|
||||
return nil, respond.NoOngoingOrUpcomingSchedule
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) RevocateUserTaskClassItem(ctx context.Context, userID, eventID int) error {
|
||||
//1.先查库,看看这个event是任务事件还是课程事件,以及判断它是否属于用户
|
||||
eventType, err := ss.scheduleDAO.GetScheduleTypeByEventID(ctx, eventID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//2.根据查询结果进行不同的撤销操作
|
||||
if eventType == "course" {
|
||||
//下面开启事务,撤销嵌入事件
|
||||
err := ss.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//下面先设置schedule表的embedded_task_id字段为null,再设置task_items表的embedded_time字段为null,实现删除嵌入事件的效果
|
||||
//3.1.先将schedule表的embedded_task_id字段设置为null
|
||||
taskID, txErr := txM.Schedule.SetScheduleEmbeddedTaskIDToNull(ctx, eventID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//3.2.再将task_items表的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//3.3.最后设置task_items表的status字段为已撤销
|
||||
txErr = txM.Schedule.RevocateSchedulesByEventID(ctx, eventID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if eventType == "task" {
|
||||
//下面开启事务,撤销任务事件
|
||||
err := ss.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//先通过rel_id找到对应的task_id
|
||||
taskID, txErr := txM.Schedule.GetRelIDByScheduleEventID(ctx, eventID)
|
||||
if txErr != nil {
|
||||
return err
|
||||
}
|
||||
//再将task_items表中对应的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//最后将其从日程表中删除(通过级联删除实现)
|
||||
err = txM.Schedule.DeleteScheduleEventAndSchedule(ctx, eventID, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
log.Println("ScheduleService.RevocateUserTaskClassItem: eventType is neither embedded_task nor task, something must be wrong")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ss *ScheduleService) SmartPlanning(ctx context.Context, userID, taskClassID int) ([]model.UserWeekSchedule, error) {
|
||||
//1.通过任务类id获取任务类详情
|
||||
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//2.校验任务类的参数是否合法
|
||||
if taskClass == nil {
|
||||
return nil, respond.WrongTaskClassID
|
||||
}
|
||||
if *taskClass.Mode != "auto" {
|
||||
return nil, respond.TaskClassModeNotAuto
|
||||
}
|
||||
//3.获取任务类安排的时间范围内的全部周数信息(左右边界不足一周的情况也要算作一周)
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(ctx, userID, conv.CalculateFirstDayOfWeek(*taskClass.StartDate), conv.CalculateLastDayOfWeek(*taskClass.EndDate))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//4.将多个周的信息传入智能排课算法,获取推荐的时间安排(周+周内的天+节次)
|
||||
result, err := logic.SmartPlanningMainLogic(schedules, taskClass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//5.将推荐的时间安排转换为前端需要的格式返回
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// SmartPlanningRaw 执行粗排算法并同时返回展示结构和已分配的任务项。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 与 SmartPlanning 共享完全相同的前置校验和粗排逻辑;
|
||||
// 2. 额外返回 allocatedItems(每项的 EmbeddedTime 已由算法回填),
|
||||
// 供 Agent 排程链路直接转换为 BatchApplyPlans 请求,无需再让模型"二次分配"。
|
||||
func (ss *ScheduleService) SmartPlanningRaw(ctx context.Context, userID, taskClassID int) ([]model.UserWeekSchedule, []model.TaskClassItem, error) {
|
||||
// 1. 获取任务类详情。
|
||||
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if taskClass == nil {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
if *taskClass.Mode != "auto" {
|
||||
return nil, nil, respond.TaskClassModeNotAuto
|
||||
}
|
||||
|
||||
// 2. 获取时间范围内的全部日程。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(ctx, userID, conv.CalculateFirstDayOfWeek(*taskClass.StartDate), conv.CalculateLastDayOfWeek(*taskClass.EndDate))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 执行粗排算法,拿到已分配的 items(EmbeddedTime 已回填)。
|
||||
allocatedItems, err := logic.SmartPlanningRawItems(schedules, taskClass)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 同时生成展示结构,供 SSE 阶段推送给前端预览。
|
||||
displayResult := conv.PlanningResultToUserWeekSchedules(schedules, allocatedItems)
|
||||
return displayResult, allocatedItems, nil
|
||||
}
|
||||
|
||||
// SmartPlanningMulti 执行“多任务类智能粗排”,仅返回前端展示结构。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责把多任务类请求收口到统一粗排流程;
|
||||
// 2. 负责返回展示结构;
|
||||
// 3. 不返回底层分配细节(由 SmartPlanningMultiRaw 提供)。
|
||||
func (ss *ScheduleService) SmartPlanningMulti(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, error) {
|
||||
displayResult, _, err := ss.SmartPlanningMultiRaw(ctx, userID, taskClassIDs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return displayResult, nil
|
||||
}
|
||||
|
||||
// SmartPlanningMultiRaw 执行“多任务类智能粗排”,同时返回展示结构和已分配任务项。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责多任务类请求的完整前置处理(归一化/校验/排序/时间窗收敛);
|
||||
// 2. 负责调用多任务类粗排主逻辑(共享资源池);
|
||||
// 3. 只计算建议,不负责落库。
|
||||
func (ss *ScheduleService) SmartPlanningMultiRaw(ctx context.Context, userID int, taskClassIDs []int) ([]model.UserWeekSchedule, []model.TaskClassItem, error) {
|
||||
// 1. 输入归一化。
|
||||
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||
if len(normalizedIDs) == 0 {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
// 2. 批量读取完整任务类(含 Items)。
|
||||
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 校验任务类并计算全局时间窗。
|
||||
orderedTaskClasses, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 拉取全局时间窗内的既有日程底板。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||
ctx,
|
||||
userID,
|
||||
conv.CalculateFirstDayOfWeek(globalStartDate),
|
||||
conv.CalculateLastDayOfWeek(globalEndDate),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 5. 执行多任务类粗排(共享资源池 + 增量占位)。
|
||||
allocatedItems, err := logic.SmartPlanningRawItemsMulti(schedules, orderedTaskClasses)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 6. 转换前端展示结构。
|
||||
displayResult := conv.PlanningResultToUserWeekSchedules(schedules, allocatedItems)
|
||||
return displayResult, allocatedItems, nil
|
||||
}
|
||||
|
||||
// ResolvePlanningWindowByTaskClasses 解析“多任务类排程窗口”的相对周/天边界。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责根据 task_class_ids 计算全局起止日期并转换成相对周/天;
|
||||
// 2. 不执行粗排、不查询课表、不生成 HybridEntries;
|
||||
// 3. 供 Agent 周级 Move 工具做硬边界校验,防止越界移动。
|
||||
//
|
||||
// 返回语义:
|
||||
// 1. startWeek/startDay:允许排程的起点(含);
|
||||
// 2. endWeek/endDay:允许排程的终点(含);
|
||||
// 3. error:任何校验或日期转换失败都返回错误。
|
||||
func (ss *ScheduleService) ResolvePlanningWindowByTaskClasses(ctx context.Context, userID int, taskClassIDs []int) (int, int, int, int, error) {
|
||||
// 1. 输入归一化:过滤非法值并去重。
|
||||
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||
if len(normalizedIDs) == 0 {
|
||||
return 0, 0, 0, 0, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
// 2. 批量查询任务类并复用统一校验逻辑,拿到全局起止日期。
|
||||
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
_, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
|
||||
// 3. 把绝对日期转换为“相对周/天”。
|
||||
// 3.1 这里统一复用 conv.RealDateToRelativeDate,确保和现有排程口径一致;
|
||||
// 3.2 若日期超出学期配置范围,直接返回错误,避免错误边界进入工具层。
|
||||
startWeek, startDay, err := conv.RealDateToRelativeDate(globalStartDate.Format(conv.DateFormat))
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
endWeek, endDay, err := conv.RealDateToRelativeDate(globalEndDate.Format(conv.DateFormat))
|
||||
if err != nil {
|
||||
return 0, 0, 0, 0, err
|
||||
}
|
||||
if endWeek < startWeek || (endWeek == startWeek && endDay < startDay) {
|
||||
return 0, 0, 0, 0, respond.InvalidDateRange
|
||||
}
|
||||
return startWeek, startDay, endWeek, endDay, nil
|
||||
}
|
||||
|
||||
// normalizeTaskClassIDsForMultiPlanning 归一化 task_class_ids(过滤非法值、去重并保序)。
|
||||
func normalizeTaskClassIDsForMultiPlanning(ids []int) []int {
|
||||
if len(ids) == 0 {
|
||||
return []int{}
|
||||
}
|
||||
normalized := make([]int, 0, len(ids))
|
||||
seen := make(map[int]struct{}, len(ids))
|
||||
for _, id := range ids {
|
||||
if id <= 0 {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[id]; exists {
|
||||
continue
|
||||
}
|
||||
seen[id] = struct{}{}
|
||||
normalized = append(normalized, id)
|
||||
}
|
||||
return normalized
|
||||
}
|
||||
|
||||
// prepareTaskClassesForMultiPlanning 把 DAO 结果转成可直接粗排的数据集。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 校验每个任务类可参与自动排程;
|
||||
// 2. 计算全局时间窗(最早开始 ~ 最晚结束);
|
||||
// 3. 执行多任务类排序策略。
|
||||
func prepareTaskClassesForMultiPlanning(taskClasses []model.TaskClass, orderedIDs []int) ([]*model.TaskClass, time.Time, time.Time, error) {
|
||||
if len(orderedIDs) == 0 {
|
||||
return nil, time.Time{}, time.Time{}, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
classByID := make(map[int]*model.TaskClass, len(taskClasses))
|
||||
for i := range taskClasses {
|
||||
tc := &taskClasses[i]
|
||||
classByID[tc.ID] = tc
|
||||
}
|
||||
|
||||
ordered := make([]*model.TaskClass, 0, len(orderedIDs))
|
||||
var globalStart time.Time
|
||||
var globalEnd time.Time
|
||||
for idx, id := range orderedIDs {
|
||||
taskClass, exists := classByID[id]
|
||||
if !exists || taskClass == nil {
|
||||
return nil, time.Time{}, time.Time{}, respond.WrongTaskClassID
|
||||
}
|
||||
if taskClass.Mode == nil || *taskClass.Mode != "auto" {
|
||||
return nil, time.Time{}, time.Time{}, respond.TaskClassModeNotAuto
|
||||
}
|
||||
if taskClass.StartDate == nil || taskClass.EndDate == nil {
|
||||
return nil, time.Time{}, time.Time{}, respond.InvalidDateRange
|
||||
}
|
||||
start := *taskClass.StartDate
|
||||
end := *taskClass.EndDate
|
||||
if end.Before(start) {
|
||||
return nil, time.Time{}, time.Time{}, respond.InvalidDateRange
|
||||
}
|
||||
if idx == 0 || start.Before(globalStart) {
|
||||
globalStart = start
|
||||
}
|
||||
if idx == 0 || end.After(globalEnd) {
|
||||
globalEnd = end
|
||||
}
|
||||
ordered = append(ordered, taskClass)
|
||||
}
|
||||
|
||||
sortTaskClassesForMultiPlanning(ordered, orderedIDs)
|
||||
return ordered, globalStart, globalEnd, nil
|
||||
}
|
||||
|
||||
// sortTaskClassesForMultiPlanning 执行稳定排序:
|
||||
// 1. end_date 早优先;
|
||||
// 2. rapid 优先于 steady;
|
||||
// 3. 输入顺序兜底。
|
||||
func sortTaskClassesForMultiPlanning(taskClasses []*model.TaskClass, inputOrder []int) {
|
||||
if len(taskClasses) <= 1 {
|
||||
return
|
||||
}
|
||||
orderIndex := make(map[int]int, len(inputOrder))
|
||||
for idx, id := range inputOrder {
|
||||
orderIndex[id] = idx
|
||||
}
|
||||
|
||||
sort.SliceStable(taskClasses, func(i, j int) bool {
|
||||
left := taskClasses[i]
|
||||
right := taskClasses[j]
|
||||
if left == nil || right == nil {
|
||||
return left != nil
|
||||
}
|
||||
if left.EndDate != nil && right.EndDate != nil && !left.EndDate.Equal(*right.EndDate) {
|
||||
return left.EndDate.Before(*right.EndDate)
|
||||
}
|
||||
leftRapid := left.Strategy != nil && *left.Strategy == "rapid"
|
||||
rightRapid := right.Strategy != nil && *right.Strategy == "rapid"
|
||||
if leftRapid != rightRapid {
|
||||
return leftRapid
|
||||
}
|
||||
leftOrder, leftOK := orderIndex[left.ID]
|
||||
rightOrder, rightOK := orderIndex[right.ID]
|
||||
if leftOK && rightOK && leftOrder != rightOrder {
|
||||
return leftOrder < rightOrder
|
||||
}
|
||||
return left.ID < right.ID
|
||||
})
|
||||
}
|
||||
|
||||
// HybridScheduleWithPlan 构建“单任务类”混合日程(existing + suggested)。
|
||||
func (ss *ScheduleService) HybridScheduleWithPlan(
|
||||
ctx context.Context, userID, taskClassID int,
|
||||
) ([]model.HybridScheduleEntry, []model.TaskClassItem, error) {
|
||||
// 1. 校验并读取任务类。
|
||||
taskClass, err := ss.taskClassDAO.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if taskClass == nil {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
if taskClass.Mode == nil || *taskClass.Mode != "auto" {
|
||||
return nil, nil, respond.TaskClassModeNotAuto
|
||||
}
|
||||
if taskClass.StartDate == nil || taskClass.EndDate == nil {
|
||||
return nil, nil, respond.InvalidDateRange
|
||||
}
|
||||
|
||||
// 2. 拉取时间窗内既有日程。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||
ctx, userID,
|
||||
conv.CalculateFirstDayOfWeek(*taskClass.StartDate),
|
||||
conv.CalculateLastDayOfWeek(*taskClass.EndDate),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 执行粗排。
|
||||
allocatedItems, err := logic.SmartPlanningRawItems(schedules, taskClass)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 统一合并。
|
||||
entries := buildHybridEntriesFromSchedulesAndAllocated(schedules, allocatedItems)
|
||||
return entries, allocatedItems, nil
|
||||
}
|
||||
|
||||
// HybridScheduleWithPlanMulti 构建“多任务类”混合日程(existing + suggested)。
|
||||
func (ss *ScheduleService) HybridScheduleWithPlanMulti(
|
||||
ctx context.Context,
|
||||
userID int,
|
||||
taskClassIDs []int,
|
||||
) ([]model.HybridScheduleEntry, []model.TaskClassItem, error) {
|
||||
// 1. 归一化任务类 ID。
|
||||
normalizedIDs := normalizeTaskClassIDsForMultiPlanning(taskClassIDs)
|
||||
if len(normalizedIDs) == 0 {
|
||||
return nil, nil, respond.WrongTaskClassID
|
||||
}
|
||||
|
||||
// 2. 拉取任务类并做校验/排序。
|
||||
taskClasses, err := ss.taskClassDAO.GetCompleteTaskClassesByIDs(ctx, userID, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
orderedTaskClasses, globalStartDate, globalEndDate, err := prepareTaskClassesForMultiPlanning(taskClasses, normalizedIDs)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 3. 拉取全局时间窗内既有日程。
|
||||
schedules, err := ss.scheduleDAO.GetUserSchedulesByTimeRange(
|
||||
ctx,
|
||||
userID,
|
||||
conv.CalculateFirstDayOfWeek(globalStartDate),
|
||||
conv.CalculateLastDayOfWeek(globalEndDate),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 4. 多任务类粗排。
|
||||
allocatedItems, err := logic.SmartPlanningRawItemsMulti(schedules, orderedTaskClasses)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// 5. 统一合并。
|
||||
entries := buildHybridEntriesFromSchedulesAndAllocated(schedules, allocatedItems)
|
||||
return entries, allocatedItems, nil
|
||||
}
|
||||
|
||||
// buildHybridEntriesFromSchedulesAndAllocated 合并 existing/suggested 条目。
|
||||
//
|
||||
// 说明:
|
||||
// 1. existing 按“事件 + 天 + 可嵌入语义 + 阻塞语义”分组,再按连续节次切块;
|
||||
// 2. suggested 直接根据 allocatedItems 生成;
|
||||
// 3. 仅做内存组装,不做数据库操作。
|
||||
func buildHybridEntriesFromSchedulesAndAllocated(
|
||||
schedules []model.Schedule,
|
||||
allocatedItems []model.TaskClassItem,
|
||||
) []model.HybridScheduleEntry {
|
||||
entries := make([]model.HybridScheduleEntry, 0, len(schedules)/2+len(allocatedItems))
|
||||
|
||||
type eventGroupKey struct {
|
||||
EventID int
|
||||
Week int
|
||||
DayOfWeek int
|
||||
CanBeEmbedded bool
|
||||
BlockForSuggested bool
|
||||
}
|
||||
type eventGroup struct {
|
||||
Key eventGroupKey
|
||||
Name string
|
||||
Type string
|
||||
Sections []int
|
||||
}
|
||||
groupMap := make(map[eventGroupKey]*eventGroup)
|
||||
|
||||
// 1. 先处理 existing。
|
||||
for _, s := range schedules {
|
||||
name := "未知"
|
||||
typ := "course"
|
||||
canBeEmbedded := false
|
||||
if s.Event != nil {
|
||||
name = s.Event.Name
|
||||
typ = s.Event.Type
|
||||
canBeEmbedded = s.Event.CanBeEmbedded
|
||||
}
|
||||
|
||||
// 1.1 阻塞语义:
|
||||
// 1.1.1 task 默认阻塞;
|
||||
// 1.1.2 course 且不可嵌入时阻塞;
|
||||
// 1.1.3 course 且可嵌入时,若当前原子格未被 embedded_task 占用,则不阻塞。
|
||||
blockForSuggested := true
|
||||
if typ == "course" && canBeEmbedded && s.EmbeddedTaskID == nil {
|
||||
blockForSuggested = false
|
||||
}
|
||||
|
||||
key := eventGroupKey{
|
||||
EventID: s.EventID,
|
||||
Week: s.Week,
|
||||
DayOfWeek: s.DayOfWeek,
|
||||
CanBeEmbedded: canBeEmbedded,
|
||||
BlockForSuggested: blockForSuggested,
|
||||
}
|
||||
group, ok := groupMap[key]
|
||||
if !ok {
|
||||
group = &eventGroup{
|
||||
Key: key,
|
||||
Name: name,
|
||||
Type: typ,
|
||||
}
|
||||
groupMap[key] = group
|
||||
}
|
||||
group.Sections = append(group.Sections, s.Section)
|
||||
}
|
||||
|
||||
for _, group := range groupMap {
|
||||
if len(group.Sections) == 0 {
|
||||
continue
|
||||
}
|
||||
sort.Ints(group.Sections)
|
||||
|
||||
runStart := group.Sections[0]
|
||||
prev := group.Sections[0]
|
||||
flushRun := func(from, to int) {
|
||||
entries = append(entries, model.HybridScheduleEntry{
|
||||
Week: group.Key.Week,
|
||||
DayOfWeek: group.Key.DayOfWeek,
|
||||
SectionFrom: from,
|
||||
SectionTo: to,
|
||||
Name: group.Name,
|
||||
Type: group.Type,
|
||||
Status: "existing",
|
||||
EventID: group.Key.EventID,
|
||||
CanBeEmbedded: group.Key.CanBeEmbedded,
|
||||
BlockForSuggested: group.Key.BlockForSuggested,
|
||||
})
|
||||
}
|
||||
for i := 1; i < len(group.Sections); i++ {
|
||||
cur := group.Sections[i]
|
||||
if cur == prev+1 {
|
||||
prev = cur
|
||||
continue
|
||||
}
|
||||
flushRun(runStart, prev)
|
||||
runStart = cur
|
||||
prev = cur
|
||||
}
|
||||
flushRun(runStart, prev)
|
||||
}
|
||||
|
||||
// 2. 再处理 suggested。
|
||||
for _, item := range allocatedItems {
|
||||
if item.EmbeddedTime == nil {
|
||||
continue
|
||||
}
|
||||
name := "未命名任务"
|
||||
if item.Content != nil && strings.TrimSpace(*item.Content) != "" {
|
||||
name = strings.TrimSpace(*item.Content)
|
||||
}
|
||||
entries = append(entries, model.HybridScheduleEntry{
|
||||
Week: item.EmbeddedTime.Week,
|
||||
DayOfWeek: item.EmbeddedTime.DayOfWeek,
|
||||
SectionFrom: item.EmbeddedTime.SectionFrom,
|
||||
SectionTo: item.EmbeddedTime.SectionTo,
|
||||
Name: name,
|
||||
Type: "task",
|
||||
Status: "suggested",
|
||||
TaskItemID: item.ID,
|
||||
TaskClassID: derefInt(item.CategoryID),
|
||||
BlockForSuggested: true,
|
||||
})
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
|
||||
func derefInt(p *int) int {
|
||||
if p == nil {
|
||||
return 0
|
||||
}
|
||||
return *p
|
||||
}
|
||||
@@ -1,552 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
type TaskClassService struct {
|
||||
// 这里可以添加数据库连接或其他依赖
|
||||
taskClassRepo *dao.TaskClassDAO
|
||||
cacheRepo *dao.CacheDAO
|
||||
scheduleRepo *dao.ScheduleDAO
|
||||
repoManager *dao.RepoManager // 统一管理多个 DAO 的事务
|
||||
}
|
||||
|
||||
func NewTaskClassService(taskClassRepo *dao.TaskClassDAO, cacheRepo *dao.CacheDAO, scheduleRepo *dao.ScheduleDAO, manager *dao.RepoManager) *TaskClassService {
|
||||
return &TaskClassService{
|
||||
taskClassRepo: taskClassRepo,
|
||||
cacheRepo: cacheRepo,
|
||||
scheduleRepo: scheduleRepo,
|
||||
repoManager: manager,
|
||||
}
|
||||
}
|
||||
|
||||
// AddOrUpdateTaskClass 为指定用户添加任务类
|
||||
func (sv *TaskClassService) AddOrUpdateTaskClass(ctx context.Context, req *model.UserAddTaskClassRequest, userID int, method int, targetTaskClassID int) error {
|
||||
//1.先校验参数
|
||||
if req.Mode == "auto" {
|
||||
if req.StartDate == "" || req.EndDate == "" {
|
||||
return respond.MissingParamForAutoScheduling
|
||||
}
|
||||
st, err := time.Parse("2006-01-02", req.StartDate)
|
||||
if err != nil {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
ed, err := time.Parse("2006-01-02", req.EndDate)
|
||||
if err != nil {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
if st.After(ed) {
|
||||
return respond.InvalidDateRange
|
||||
}
|
||||
}
|
||||
if req.Mode == "" || req.Name == "" || len(req.Items) == 0 {
|
||||
return respond.MissingParam
|
||||
}
|
||||
// 1. excluded_slots 属于“半天块索引”,每个索引映射 2 节(1->1-2,...,6->11-12);
|
||||
// 2. 若允许 7~12,会在粗排网格展开时产生越界节次,触发运行时 panic;
|
||||
// 3. 这里统一在写入入口拦截,避免脏数据落库后污染后续排程链路。
|
||||
for _, slot := range req.Config.ExcludedSlots {
|
||||
if slot < 1 || slot > 6 {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
}
|
||||
// 1. excluded_days_of_week 表示“整天不可排”的硬约束,粗排时会直接整天屏蔽;
|
||||
// 2. 只允许 1~7,对应周一到周日;
|
||||
// 3. 若写入非法值,会导致粗排过滤口径和前端展示口径不一致,因此入口直接拦截。
|
||||
for _, dayOfWeek := range req.Config.ExcludedDaysOfWeek {
|
||||
if dayOfWeek < 1 || dayOfWeek > 7 {
|
||||
return respond.WrongParamType
|
||||
}
|
||||
}
|
||||
//2.写数据库(事务内)
|
||||
if err := sv.taskClassRepo.Transaction(func(txDAO *dao.TaskClassDAO) error {
|
||||
taskClass, items, err := conv.ProcessUserAddTaskClassRequest(req, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if method == 1 { // 更新操作
|
||||
taskClass.ID = targetTaskClassID
|
||||
}
|
||||
|
||||
taskClassID, err := txDAO.AddOrUpdateTaskClass(userID, taskClass)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := range items {
|
||||
items[i].CategoryID = &taskClassID
|
||||
}
|
||||
if err := txDAO.AddOrUpdateTaskClassItems(userID, items); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) GetUserTaskClassInfos(ctx context.Context, userID int) (*model.UserGetTaskClassesResponse, error) {
|
||||
//1.先查询redis
|
||||
list, err := sv.cacheRepo.GetTaskClassList(ctx, userID)
|
||||
if err == nil {
|
||||
//命中缓存
|
||||
return list, nil
|
||||
} else if !errors.Is(err, redis.Nil) { //不是缓存未命中错误,说明redis可能炸了,照常放行
|
||||
log.Println("redis获取任务分类列表失败:", err)
|
||||
}
|
||||
//2.缓存未命中,查询数据库
|
||||
taskClasses, err := sv.taskClassRepo.GetUserTaskClasses(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp := conv.TaskClassModelToResponse(taskClasses)
|
||||
//3.写入缓存
|
||||
err = sv.cacheRepo.AddTaskClassList(ctx, userID, resp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) GetUserCompleteTaskClass(ctx context.Context, userID int, taskClassID int) (*model.UserAddTaskClassRequest, error) {
|
||||
//1.查询数据库
|
||||
taskClass, err := sv.taskClassRepo.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//2.转换为响应结构体
|
||||
resp, err := conv.ProcessUserGetCompleteTaskClassRequest(taskClass)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) AddTaskClassItemIntoSchedule(ctx context.Context, req *model.UserInsertTaskClassItemToScheduleRequest, userID int, taskID int) error {
|
||||
//1.先验证任务块归属
|
||||
taskClassID, err := sv.taskClassRepo.GetTaskClassIDByTaskItemID(ctx, taskID) //通过任务块ID获取所属任务类ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ownerID, err := sv.taskClassRepo.GetTaskClassUserIDByID(ctx, taskClassID) //通过任务类ID获取所属用户ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ownerID != userID {
|
||||
return respond.TaskClassItemNotBelongToUser
|
||||
}
|
||||
//2.再检查任务块本身是否已经被安排
|
||||
result, err := sv.taskClassRepo.IfTaskClassItemArranged(ctx, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result {
|
||||
return respond.TaskClassItemAlreadyArranged
|
||||
}
|
||||
//3.取出任务块信息
|
||||
taskItem, err := sv.taskClassRepo.GetTaskClassItemByID(ctx, taskID) //通过任务块ID获取任务块信息
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//更新TaskClassItem的embedded_time字段
|
||||
taskItem.EmbeddedTime = &model.TargetTime{
|
||||
DayOfWeek: req.DayOfWeek,
|
||||
Week: req.Week,
|
||||
SectionFrom: req.StartSection,
|
||||
SectionTo: req.EndSection,
|
||||
}
|
||||
//3.判断是否嵌入课程
|
||||
if req.EmbedCourseEventID != 0 {
|
||||
//先检查看课程是否存在、是否归属该用户以及是否已经被嵌入了其他任务块
|
||||
courseOwnerID, err := sv.scheduleRepo.GetCourseUserIDByID(ctx, req.EmbedCourseEventID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if courseOwnerID != userID {
|
||||
return respond.CourseNotBelongToUser
|
||||
}
|
||||
//再检查用户给的时间是否和课程的时间匹配(目前逻辑是给的区间必须完全匹配)
|
||||
match, err := sv.scheduleRepo.IsCourseTimeMatch(ctx, req.EmbedCourseEventID, req.Week, req.DayOfWeek, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !match {
|
||||
return respond.CourseTimeNotMatch
|
||||
}
|
||||
//查询对应时段的课程是否已被其他任务块嵌入了(目前业务限制:一个课程只能被一个任务块嵌入,但是目前设计是支持多个任务块嵌入一节课的,只要放得下)
|
||||
isEmbedded, err := sv.scheduleRepo.IsCourseEmbeddedByOtherTaskBlock(ctx, req.EmbedCourseEventID, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isEmbedded {
|
||||
return respond.CourseAlreadyEmbeddedByOtherTaskBlock
|
||||
}
|
||||
//嵌入课程,直接更新日程表对应时段的 embedded_task_id 字段
|
||||
err = sv.scheduleRepo.EmbedTaskIntoSchedule(req.StartSection, req.EndSection, req.DayOfWeek, req.Week, userID, taskID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//更新任务块的 embedded_time 字段
|
||||
err = sv.taskClassRepo.UpdateTaskClassItemEmbeddedTime(ctx, taskID, taskItem.EmbeddedTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
//4.否则构造Schedule模型
|
||||
sections := make([]int, 0, req.EndSection-req.StartSection+1)
|
||||
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(req, taskItem, nil, userID, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//将节次区间转换为节次切片,方便后续检查冲突
|
||||
for section := req.StartSection; section <= req.EndSection; section++ {
|
||||
sections = append(sections, section)
|
||||
}
|
||||
//4.1 统一检查冲突(避免逐条查库)
|
||||
conflict, err := sv.scheduleRepo.HasUserScheduleConflict(ctx, userID, req.Week, req.DayOfWeek, sections)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if conflict {
|
||||
return respond.ScheduleConflict
|
||||
}
|
||||
// 5. 写入数据库(通过 RepoManager 统一管理事务)
|
||||
// 这里的 sv.daoManager 是你在初始化 Service 时注入的全局 RepoManager 实例
|
||||
if err := sv.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
// 5.1 使用事务中的 ScheduleRepo 插入 Event
|
||||
// 💡 这里的 txM.Schedule 已经注入了事务句柄
|
||||
//此处要将req中的起始section以及第几周、星期几转换成绝对时间,存入scheduleEvent的StartTime和EndTime字段中,方便后续查询和冲突检查
|
||||
st, ed, err := conv.RelativeTimeToRealTime(req.Week, req.DayOfWeek, req.StartSection, req.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
scheduleEvent.StartTime = st
|
||||
scheduleEvent.EndTime = ed
|
||||
eventID, err := txM.Schedule.AddScheduleEvent(scheduleEvent)
|
||||
if err != nil {
|
||||
return err // 触发回滚
|
||||
}
|
||||
// 5.2 关联 ID(纯内存操作,无需 tx)
|
||||
for i := range schedules {
|
||||
schedules[i].EventID = eventID
|
||||
}
|
||||
// 5.3 使用事务中的 ScheduleRepo 批量插入原子槽位
|
||||
// 💡 如果这里因为外键或唯一索引报错,5.1 的 Event 也会被撤回
|
||||
if _, err = txM.Schedule.AddSchedules(schedules); err != nil {
|
||||
return err // 触发回滚
|
||||
}
|
||||
// 5.4 使用事务中的 TaskRepo 更新任务状态
|
||||
// 💡 这里的 txM.Task 取代了你原来的 txDAO
|
||||
if err := txM.TaskClass.UpdateTaskClassItemEmbeddedTime(ctx, taskID, taskItem.EmbeddedTime); err != nil {
|
||||
return err // 触发回滚
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
// 这里处理最终的错误返回,比如 respond.Error
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) DeleteTaskClassItem(ctx context.Context, userID int, taskItemID int) error {
|
||||
//1.先验证任务块归属
|
||||
taskClassID, err := sv.taskClassRepo.GetTaskClassIDByTaskItemID(ctx, taskItemID) //通过任务块ID获取所属任务类ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ownerID, err := sv.taskClassRepo.GetTaskClassUserIDByID(ctx, taskClassID) //通过任务类ID获取所属用户ID
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ownerID != userID {
|
||||
return respond.TaskClassItemNotBelongToUser
|
||||
}
|
||||
//2.如果该任务块已经被安排了,先解除安排,再删除任务块(事务)
|
||||
if err := sv.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//2.1.先检查该任务块是否已经被安排了
|
||||
arranged, err := txM.TaskClass.IfTaskClassItemArranged(ctx, taskItemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if arranged {
|
||||
//2.2.如果已经被安排了,先解除安排
|
||||
//先扫schedules找到该task_item_id并删除
|
||||
_, txErr := txM.Schedule.FindEmbeddedTaskIDAndDeleteIt(ctx, taskItemID)
|
||||
//2.3.再将task_items表的embedded_time字段设置为null
|
||||
txErr = txM.TaskClass.DeleteTaskClassItemEmbeddedTime(ctx, taskItemID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
//再删除schedule_event表中对应的事件
|
||||
txErr = txM.Schedule.DeleteScheduleEventByTaskItemID(ctx, taskItemID)
|
||||
if txErr != nil {
|
||||
return txErr
|
||||
}
|
||||
}
|
||||
//2.4.最后删除任务块
|
||||
err = txM.TaskClass.DeleteTaskClassItemByID(ctx, taskItemID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) DeleteTaskClass(ctx context.Context, userID int, taskClassID int) error {
|
||||
//1.先验证任务类归属
|
||||
ownerID, err := sv.taskClassRepo.GetTaskClassUserIDByID(ctx, taskClassID) //通过任务类ID获取所属用户ID
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return respond.WrongTaskClassID
|
||||
}
|
||||
return err
|
||||
}
|
||||
if ownerID != userID {
|
||||
return respond.TaskClassNotBelongToUser
|
||||
}
|
||||
//2.删除任务类(事务)
|
||||
err = sv.taskClassRepo.DeleteTaskClassByID(ctx, taskClassID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCompleteTaskClassByID 获取任务类完整详情(含关联的 TaskClassItem 列表)。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1) 直接委托 DAO 层查询,不做额外业务逻辑;
|
||||
// 2) 主要供 Agent 排程链路使用,获取 Items 用于 materialize 节点映射。
|
||||
func (sv *TaskClassService) GetCompleteTaskClassByID(ctx context.Context, taskClassID, userID int) (*model.TaskClass, error) {
|
||||
return sv.taskClassRepo.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
}
|
||||
|
||||
func (sv *TaskClassService) BatchApplyPlans(ctx context.Context, taskClassID int, userID int, plans *model.UserInsertTaskClassItemToScheduleRequestBatch) error {
|
||||
//1.通过任务类id获取任务类详情
|
||||
taskClass, err := sv.taskClassRepo.GetCompleteTaskClassByID(ctx, taskClassID, userID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return respond.WrongTaskClassID
|
||||
}
|
||||
return err
|
||||
}
|
||||
//2.校验任务类的参数是否合法
|
||||
if taskClass == nil {
|
||||
return respond.WrongTaskClassID
|
||||
}
|
||||
if *taskClass.Mode != "auto" {
|
||||
return respond.TaskClassModeNotAuto
|
||||
}
|
||||
//3.获取任务类安排的时间范围内的全部周数信息(左右边界不足一周的情况也要算作一周),用于下方冲突检查
|
||||
startWeekTime := conv.CalculateFirstDayOfWeek(*taskClass.StartDate)
|
||||
endWeekTime := conv.CalculateLastDayOfWeek(*taskClass.EndDate)
|
||||
schedules, err := sv.scheduleRepo.GetUserSchedulesByTimeRange(ctx, userID, startWeekTime, endWeekTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
startWeek, _, err := conv.RealDateToRelativeDate(startWeekTime.Format("2006-01-02"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endWeek, _, err := conv.RealDateToRelativeDate(endWeekTime.Format("2006-01-02"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//4.统一检查冲突(避免逐条查库)
|
||||
//先将日程放入一个map中,key是"周-星期-节次",value是课程信息,方便后续检查冲突
|
||||
courseMap := make(map[string]model.Schedule)
|
||||
for _, schedule := range schedules {
|
||||
key := fmt.Sprintf("%d-%d-%d", schedule.Week, schedule.DayOfWeek, schedule.Section)
|
||||
courseMap[key] = schedule
|
||||
}
|
||||
//再遍历每个任务块的安排时间,检查是否和课程冲突(目前逻辑是只要有一个时段冲突就算冲突,后续可以优化为统计冲突的时段数量,或者提供具体的冲突时段信息)
|
||||
for _, plan := range plans.Items {
|
||||
if plan.Week < startWeek || plan.Week > endWeek {
|
||||
return respond.TaskClassItemTryingToInsertOutOfTimeRange
|
||||
}
|
||||
for section := plan.StartSection; section <= plan.EndSection; section++ {
|
||||
key := fmt.Sprintf("%d-%d-%d", plan.Week, plan.DayOfWeek, section)
|
||||
// 如果课程存在,并且满足以下任一条件则认为冲突:
|
||||
// 1. 课程时段已经被其他任务块嵌入了(不允许多个任务块嵌入同一课程)
|
||||
// 2. 当前时段的课的EventID与用户计划中指定的EmbedCourseEventID不匹配(说明用户计划要嵌入的课程和当前时段的课不是同一节)
|
||||
// 3. 用户计划中没有指定EmbedCourseEventID(即EmbedCourseEventID为0),但当前时段有课(不允许在有课的时段安排任务块)
|
||||
// 4. 当前时段的课不允许被嵌入(即使用户计划中指定了EmbedCourseEventID,但如果课程本身不允许被嵌入了,也算冲突)
|
||||
if course, exists := courseMap[key]; exists && ((plan.EmbedCourseEventID != 0 && course.EmbeddedTask != nil) ||
|
||||
(plan.EmbedCourseEventID != course.EventID) || plan.EmbedCourseEventID == 0 || !course.Event.CanBeEmbedded) {
|
||||
return respond.ScheduleConflict
|
||||
}
|
||||
}
|
||||
}
|
||||
//5.分流批量写入数据库(通过 RepoManager 统一管理事务)
|
||||
//先分流
|
||||
toEmbed := make([]model.SingleTaskClassItem, 0) //需要嵌入课程的任务块
|
||||
toNormal := make([]model.SingleTaskClassItem, 0) //需要新建日程的任务块
|
||||
for _, item := range plans.Items {
|
||||
if item.EmbedCourseEventID != 0 {
|
||||
toEmbed = append(toEmbed, item)
|
||||
} else {
|
||||
toNormal = append(toNormal, item)
|
||||
}
|
||||
}
|
||||
//再开事务批量写库
|
||||
if err := sv.repoManager.Transaction(ctx, func(txM *dao.RepoManager) error {
|
||||
//5.1 先处理需要嵌入课程的任务块
|
||||
//先提取出需要嵌入的课程ID和TaskItemID列表
|
||||
courseIDs := make([]int, 0, len(toEmbed))
|
||||
for _, item := range toEmbed {
|
||||
courseIDs = append(courseIDs, item.EmbedCourseEventID)
|
||||
}
|
||||
itemIDs := make([]int, 0, len(toEmbed))
|
||||
for _, item := range toEmbed {
|
||||
itemIDs = append(itemIDs, item.TaskItemID)
|
||||
}
|
||||
//检查任务块本身是否已经被安排
|
||||
result, err := sv.taskClassRepo.BatchCheckIfTaskClassItemsArranged(ctx, itemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result {
|
||||
return respond.TaskClassItemAlreadyArranged
|
||||
}
|
||||
//验证一下plans中的taskItemID确实都属于这个用户和这个任务类(避免用户恶意构造请求把别的用户的任务块或者不属于任何任务类的任务块也安排了)
|
||||
//同时也能检查是否重复
|
||||
result, err = sv.taskClassRepo.ValidateTaskItemIDsBelongToTaskClass(ctx, taskClassID, itemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !result {
|
||||
return respond.TaskClassItemNotBelongToTaskClass
|
||||
}
|
||||
//批量更新日程表中对应课程的embedded_task_id字段(目前业务限制:一个课程只能被一个任务块嵌入了,所以直接批量更新,不用担心覆盖问题)
|
||||
err = txM.Schedule.BatchEmbedTaskIntoSchedule(ctx, courseIDs, itemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//批量更新任务块的embedded_time字段
|
||||
targetTimes := make([]*model.TargetTime, 0, len(toEmbed))
|
||||
for _, item := range toEmbed {
|
||||
targetTimes = append(targetTimes, &model.TargetTime{
|
||||
DayOfWeek: item.DayOfWeek,
|
||||
Week: item.Week,
|
||||
SectionFrom: item.StartSection,
|
||||
SectionTo: item.EndSection,
|
||||
})
|
||||
}
|
||||
err = txM.TaskClass.BatchUpdateTaskClassItemEmbeddedTime(ctx, itemIDs, targetTimes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
//5.2 再处理需要新建日程的任务块
|
||||
//先提取出需要新建日程的任务块ID列表
|
||||
normalItemIDs := make([]int, 0, len(toNormal))
|
||||
for _, item := range toNormal {
|
||||
normalItemIDs = append(normalItemIDs, item.TaskItemID)
|
||||
}
|
||||
//验证一下plans中的taskItemID确实都属于这个任务类(避免用户恶意构造请求把别的用户的任务块或者不属于任何任务类的任务块也安排了)
|
||||
result, err = sv.taskClassRepo.ValidateTaskItemIDsBelongToTaskClass(ctx, taskClassID, normalItemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !result {
|
||||
return respond.TaskClassItemNotBelongToTaskClass
|
||||
}
|
||||
//批量提取TaskItems
|
||||
taskItems, err := txM.TaskClass.GetTaskClassItemsByIDs(ctx, normalItemIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(taskItems) != len(normalItemIDs) {
|
||||
log.Printf("警告:批量提取任务块时,返回的任务块数量与请求中的任务块ID数量不匹配,可能存在数据问题。请求ID数量:%d,返回任务块数量:%d", len(normalItemIDs), len(taskItems))
|
||||
return respond.InternalError(errors.New("返回的任务块数量与请求中的任务块ID数量不匹配,可能存在数据问题"))
|
||||
}
|
||||
//将toNormal按照TaskItemID升序排序,将taskItems也按照ID升序排序,保证一一对应关系(上面已经检查过重复)
|
||||
//如果请求中的任务块ID有重复,这里就无法保证一一对应关系了,后续可以考虑在请求层面加一个校验,拒绝包含重复任务块ID的请求
|
||||
sort.SliceStable(toNormal, func(i, j int) bool {
|
||||
return toNormal[i].TaskItemID < toNormal[j].TaskItemID
|
||||
})
|
||||
sort.SliceStable(taskItems, func(i, j int) bool {
|
||||
return taskItems[i].ID < taskItems[j].ID
|
||||
})
|
||||
//开始构建event和schedules
|
||||
finalSchedules := make([]model.Schedule, 0) //最终要插入数据库的Schedule切片
|
||||
finalScheduleEvents := make([]model.ScheduleEvent, 0) //最终要插入数据库的ScheduleEvent切片
|
||||
pos := make([]int, 0) //记录每个任务块对应的Schedule在finalSchedules中的位置,方便后续批量插入数据库后回填EventID
|
||||
for i := 0; i < len(toNormal); i++ {
|
||||
item := toNormal[i]
|
||||
taskItem := taskItems[i]
|
||||
if item.StartSection < 1 || item.EndSection > 12 || item.StartSection > item.EndSection {
|
||||
return respond.InvalidSectionRange
|
||||
}
|
||||
schedules, scheduleEvent, err := conv.UserInsertTaskItemRequestToModel(&model.UserInsertTaskClassItemToScheduleRequest{
|
||||
Week: item.Week,
|
||||
DayOfWeek: item.DayOfWeek,
|
||||
StartSection: item.StartSection,
|
||||
EndSection: item.EndSection,
|
||||
EmbedCourseEventID: 0, //不嵌入课程
|
||||
}, &taskItem, nil, userID, item.StartSection, item.EndSection)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalScheduleEvents = append(finalScheduleEvents, *scheduleEvent)
|
||||
for range schedules {
|
||||
pos = append(pos, len(finalScheduleEvents)-1)
|
||||
}
|
||||
finalSchedules = append(finalSchedules, schedules...)
|
||||
}
|
||||
//最后批量插入数据库
|
||||
//先插入ScheduleEvent表,获取生成的EventID,再批量插入Schedule表,最后批量更新TaskClassItem的embedded_time字段
|
||||
ids, err := txM.Schedule.InsertScheduleEvents(ctx, finalScheduleEvents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// 将生成的 ScheduleEvent ID 赋值给对应的 Schedule 的 EventID 字段
|
||||
for i := range finalSchedules {
|
||||
finalSchedules[i].EventID = ids[pos[i]]
|
||||
}
|
||||
if _, err = txM.Schedule.AddSchedules(finalSchedules); err != nil {
|
||||
return err
|
||||
}
|
||||
//批量更新任务块的embedded_time字段
|
||||
targetTimes = make([]*model.TargetTime, 0, len(toEmbed))
|
||||
for _, item := range toNormal {
|
||||
targetTimes = append(targetTimes, &model.TargetTime{
|
||||
DayOfWeek: item.DayOfWeek,
|
||||
Week: item.Week,
|
||||
SectionFrom: item.StartSection,
|
||||
SectionTo: item.EndSection,
|
||||
})
|
||||
}
|
||||
//提取出所有需要更新的任务块ID
|
||||
itemIDs = make([]int, 0, len(toNormal))
|
||||
for _, item := range toNormal {
|
||||
itemIDs = append(itemIDs, item.TaskItemID)
|
||||
}
|
||||
err = txM.TaskClass.BatchUpdateTaskClassItemEmbeddedTime(ctx, itemIDs, targetTimes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,537 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/dao"
|
||||
outboxinfra "github.com/LoveLosita/smartflow/backend/infra/outbox"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/respond"
|
||||
eventsvc "github.com/LoveLosita/smartflow/backend/service/events"
|
||||
"github.com/go-redis/redis/v8"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
const (
|
||||
// taskBatchStatusMaxIDs 限制批量状态查询的单次任务 ID 数量,避免大请求放大缓存/内存扫描成本。
|
||||
taskBatchStatusMaxIDs = 100
|
||||
// taskUrgencyPromoteDedupeTTL 是"同一任务平移请求"的去重锁有效期。
|
||||
//
|
||||
// 设计考虑:
|
||||
// 1. 太短会导致消费稍慢时被重复投递;
|
||||
// 2. 太长会导致首次投递失败后恢复变慢;
|
||||
// 3. 这里先取 120 秒作为折中值,后续可按线上观测再调优。
|
||||
taskUrgencyPromoteDedupeTTL = 120 * time.Second
|
||||
// taskUrgencyPromoteDedupeKeyFmt 是任务平移去重键模板。
|
||||
taskUrgencyPromoteDedupeKeyFmt = "smartflow:task:promote:pending:%d:%d"
|
||||
)
|
||||
|
||||
type TaskService struct {
|
||||
// dao 负责任务表读写。
|
||||
dao *dao.TaskDAO
|
||||
// cache 负责任务列表缓存与 Redis 去重锁能力。
|
||||
cache *dao.CacheDAO
|
||||
// eventPublisher 负责发布 outbox 事件(可能为空:例如未启用 Kafka/总线时)。
|
||||
eventPublisher outboxinfra.EventPublisher
|
||||
// activeScheduleDAO 负责维护主动调度 due job;为空时保持旧任务链路兼容。
|
||||
activeScheduleDAO *dao.ActiveScheduleDAO
|
||||
}
|
||||
|
||||
// NewTaskService 创建 TaskService 实例。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做依赖注入,不做连接可用性探测;
|
||||
// 2. 允许 eventPublisher 为空(用于本地降级场景)。
|
||||
func NewTaskService(taskDAO *dao.TaskDAO, cacheDAO *dao.CacheDAO, eventPublisher outboxinfra.EventPublisher) *TaskService {
|
||||
return &TaskService{
|
||||
dao: taskDAO,
|
||||
cache: cacheDAO,
|
||||
eventPublisher: eventPublisher,
|
||||
}
|
||||
}
|
||||
|
||||
// SetActiveScheduleDAO 注入主动调度自有表仓储。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只负责迁移期依赖接线,避免扩大 TaskService 构造函数调用面;
|
||||
// 2. 不改变任务主流程语义,未注入时主动调度 job 同步自动降级为 no-op。
|
||||
func (ts *TaskService) SetActiveScheduleDAO(activeScheduleDAO *dao.ActiveScheduleDAO) {
|
||||
if ts != nil {
|
||||
ts.activeScheduleDAO = activeScheduleDAO
|
||||
}
|
||||
}
|
||||
|
||||
// AddTask 新增任务。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责参数转换、优先级合法性校验与写库;
|
||||
// 2. 不负责"紧急性自动平移"逻辑(该逻辑发生在任务读取时的懒触发链路)。
|
||||
func (ts *TaskService) AddTask(ctx context.Context, req *model.UserAddTaskRequest, userID int) (*model.UserAddTaskResponse, error) {
|
||||
// 1. 把用户请求转换为内部模型,避免 API 层结构直接泄漏到 DAO。
|
||||
taskModel := conv.UserAddTaskRequestToModel(req, userID)
|
||||
// 2. 优先级范围校验:当前任务体系只允许 1~4。
|
||||
if taskModel.Priority < 1 || taskModel.Priority >= 5 {
|
||||
return nil, respond.InvalidPriority
|
||||
}
|
||||
// 3. 写库。
|
||||
createdTask, err := ts.dao.AddTask(taskModel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ts.syncActiveScheduleJobBestEffort(ctx, createdTask)
|
||||
// 4. 返回对外响应 DTO。
|
||||
response := conv.ModelToUserAddTaskResponse(createdTask)
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// CompleteTask 将用户指定任务标记为"已完成"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责入参校验与业务错误映射;
|
||||
// 2. 负责调用 DAO 执行状态更新;
|
||||
// 3. 不负责幂等键校验(幂等由中间件处理);
|
||||
// 4. 不负责缓存删除细节(缓存删除由 GORM cache_deleter 回调触发)。
|
||||
func (ts *TaskService) CompleteTask(ctx context.Context, req *model.UserCompleteTaskRequest, userID int) (*model.UserCompleteTaskResponse, error) {
|
||||
// 1. 参数兜底:请求体为空、非法 user 或非法 task_id 直接返回业务错误。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 调用 DAO 执行"查询 + 必要时更新"。
|
||||
updatedTask, alreadyCompleted, err := ts.dao.CompleteTaskByID(ctx, userID, req.TaskID)
|
||||
if err != nil {
|
||||
// 2.1 任务不存在或不属于当前用户时,统一映射为 WrongTaskID。
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
// 2.2 其余数据库异常向上透传,交由统一错误处理器返回 500。
|
||||
return nil, err
|
||||
}
|
||||
if updatedTask == nil {
|
||||
// 3. 极端防御:DAO 不应返回 nil,若发生则视为内部异常。
|
||||
return nil, errors.New("complete task succeeded but task is nil")
|
||||
}
|
||||
|
||||
// 4. 构造响应:
|
||||
// 4.1 already_completed=true 表示本次命中幂等,不影响最终成功状态;
|
||||
// 4.2 is_completed 始终为 true,便于前端直接刷新状态。
|
||||
resp := &model.UserCompleteTaskResponse{
|
||||
TaskID: updatedTask.ID,
|
||||
IsCompleted: true,
|
||||
AlreadyCompleted: alreadyCompleted,
|
||||
Status: "completed",
|
||||
}
|
||||
ts.cancelActiveScheduleJobBestEffort(ctx, updatedTask.UserID, updatedTask.ID, "task_completed")
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UndoCompleteTask 取消用户任务的"已完成勾选"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责入参校验与业务错误映射;
|
||||
// 2. 负责调用 DAO 执行状态恢复;
|
||||
// 3. 不负责幂等缓存(本接口按需求要求:任务未完成时必须报错);
|
||||
// 4. 不负责缓存删除细节(由 GORM cache_deleter 回调自动处理)。
|
||||
func (ts *TaskService) UndoCompleteTask(ctx context.Context, req *model.UserUndoCompleteTaskRequest, userID int) (*model.UserUndoCompleteTaskResponse, error) {
|
||||
// 1. 参数兜底:请求体为空、非法 user 或非法 task_id 直接返回业务错误。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 调用 DAO 执行"恢复未完成"逻辑。
|
||||
updatedTask, err := ts.dao.UndoCompleteTaskByID(ctx, userID, req.TaskID)
|
||||
if err != nil {
|
||||
// 2.1 任务不存在或不属于当前用户,统一映射为 WrongTaskID。
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, respond.WrongTaskID
|
||||
}
|
||||
// 2.2 任务本来就未完成:按需求返回明确业务错误。
|
||||
if errors.Is(err, respond.TaskNotCompleted) {
|
||||
return nil, respond.TaskNotCompleted
|
||||
}
|
||||
// 2.3 其余数据库异常继续向上透传。
|
||||
return nil, err
|
||||
}
|
||||
if updatedTask == nil {
|
||||
// 3. 极端防御:DAO 成功但返回 nil,视为内部异常。
|
||||
return nil, errors.New("undo complete task succeeded but task is nil")
|
||||
}
|
||||
|
||||
// 4. 组装响应:恢复成功后 is_completed 恒为 false。
|
||||
resp := &model.UserUndoCompleteTaskResponse{
|
||||
TaskID: updatedTask.ID,
|
||||
IsCompleted: false,
|
||||
Status: "uncompleted",
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetUserTasks 获取用户任务列表(含"读时紧急性派生"与"异步平移触发")。
|
||||
//
|
||||
// 核心流程(步骤化):
|
||||
// 1. 先读缓存,未命中再回源 DB,并把"原始模型"回填缓存;
|
||||
// 2. 在内存里做"读时派生":仅用于本次返回给前端,不直接改库;
|
||||
// 3. 收集"已到紧急分界线且仍处于非紧急象限"的任务 ID;
|
||||
// 4. 通过 Redis SETNX 去重后,发布 outbox 事件异步落库;
|
||||
// 5. 无论发布成功与否,都优先返回本次派生结果,保证用户读体验。
|
||||
//
|
||||
// 一致性策略:
|
||||
// 1. 缓存里存的是原始任务,不是派生后的优先级;
|
||||
// 2. 真实平移由异步消费者条件更新 DB;
|
||||
// 3. DB 更新后由 cache_deleter 自动删缓存,下一次读取自然拿到新状态。
|
||||
func (ts *TaskService) GetUserTasks(ctx context.Context, userID int) ([]model.GetUserTaskResp, error) {
|
||||
derivedTasks, err := ts.GetTasksWithUrgencyPromotion(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return conv.ModelToGetUserTasksResp(derivedTasks), nil
|
||||
}
|
||||
|
||||
// BatchTaskStatus 批量查询当前登录用户任务的完成状态。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责请求 ID 的过滤、去重和数量限制;
|
||||
// 2. 只返回当前用户有权访问且仍存在的任务,避免泄露其他用户任务状态;
|
||||
// 3. 复用 getRawUserTasks 的 Redis 任务列表缓存链路,不新增绕过缓存的 DAO 查询;
|
||||
// 4. 该接口只读,不触发 GORM cache_deleter,也不反向修改 NewAgent timeline 历史快照。
|
||||
func (ts *TaskService) BatchTaskStatus(ctx context.Context, req *model.BatchTaskStatusRequest, userID int) (*model.BatchTaskStatusResponse, error) {
|
||||
resp := &model.BatchTaskStatusResponse{
|
||||
Items: []model.BatchTaskStatusItem{},
|
||||
}
|
||||
if userID <= 0 {
|
||||
return nil, respond.WrongUserID
|
||||
}
|
||||
if req == nil {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// 1. 先把前端传入的历史卡片 task id 做归一化。
|
||||
// 1.1 非法 ID 直接过滤,避免无意义匹配;
|
||||
// 1.2 保留首次出现顺序,方便前端按请求顺序回填;
|
||||
// 1.3 超过上限时截断,避免单次 hydration 请求放大服务端成本。
|
||||
validIDs := compactPositiveUniqueTaskIDsWithLimit(req.IDs, taskBatchStatusMaxIDs)
|
||||
if len(validIDs) == 0 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// 2. 复用原始任务读取链路。
|
||||
// 2.1 命中 Redis 时直接读取 smartflow:tasks:{userID};
|
||||
// 2.2 未命中时由 getRawUserTasks 回源 DB 并回填缓存;
|
||||
// 2.3 用户没有任何任务时映射为空 items,符合 hydration 的“无匹配不报错”语义。
|
||||
tasks, err := ts.getRawUserTasks(ctx, userID)
|
||||
if err != nil {
|
||||
if errors.Is(err, respond.UserTasksEmpty) {
|
||||
return resp, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 在当前用户任务集合内做内存匹配。
|
||||
// 3.1 不命中的 ID 可能是已删除、属于其他用户、或历史快照里的旧任务,统一静默过滤;
|
||||
// 3.2 返回字段只包含当前模型可用的完成状态,避免伪造不存在的 updated_at。
|
||||
taskByID := make(map[int]model.Task, len(tasks))
|
||||
for _, task := range tasks {
|
||||
taskByID[task.ID] = task
|
||||
}
|
||||
for _, id := range validIDs {
|
||||
task, exists := taskByID[id]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
resp.Items = append(resp.Items, model.BatchTaskStatusItem{
|
||||
ID: task.ID,
|
||||
IsCompleted: task.IsCompleted,
|
||||
})
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetTasksWithUrgencyPromotion 读取用户任务并应用读时紧急性提升 + 异步落库触发。
|
||||
//
|
||||
// 统一入口,供前端查询(GetUserTasks)和 LLM 工具查询(QueryTasksForTool)复用。
|
||||
// 调用方不应假设 DB 已更新——持久化是异步的。
|
||||
func (ts *TaskService) GetTasksWithUrgencyPromotion(ctx context.Context, userID int) ([]model.Task, error) {
|
||||
rawTasks, err := ts.getRawUserTasks(ctx, userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
derivedTasks, duePromoteTaskIDs := deriveTaskUrgencyForRead(rawTasks, time.Now())
|
||||
ts.tryEnqueueTaskUrgencyPromote(ctx, userID, duePromoteTaskIDs)
|
||||
return derivedTasks, nil
|
||||
}
|
||||
|
||||
// getRawUserTasks 读取"原始任务模型"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责缓存命中/回源 DB/回填缓存;
|
||||
// 2. 不做优先级派生,不做异步事件投递;
|
||||
// 3. 缓存写失败只记日志,不阻断主流程。
|
||||
func (ts *TaskService) getRawUserTasks(ctx context.Context, userID int) ([]model.Task, error) {
|
||||
// 1. 先查缓存:命中则直接返回。
|
||||
cachedTasks, err := ts.cache.GetUserTasksFromCache(ctx, userID)
|
||||
if err == nil {
|
||||
return cachedTasks, nil
|
||||
}
|
||||
|
||||
// 2. 非 redis.Nil 错误直接返回,避免掩盖真实故障。
|
||||
if !errors.Is(err, redis.Nil) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 3. 缓存未命中回源 DB。
|
||||
dbTasks, err := ts.dao.GetTasksByUserID(userID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 4. 回填缓存(失败不阻断主链路)。
|
||||
if setErr := ts.cache.SetUserTasksToCache(ctx, userID, dbTasks); setErr != nil {
|
||||
log.Printf("写入用户任务缓存失败: user_id=%d err=%v", userID, setErr)
|
||||
}
|
||||
return dbTasks, nil
|
||||
}
|
||||
|
||||
// deriveTaskUrgencyForRead 对任务做"读时紧急性派生",并收集需要异步落库的任务 ID。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只在内存里改本次返回值,不写 DB;
|
||||
// 2. 只做"到线且未完成任务"的优先级映射;
|
||||
// 3. 不处理去重锁和事件发布。
|
||||
//
|
||||
// 返回语义:
|
||||
// 1. 第一个返回值:可直接用于响应前端的派生任务切片;
|
||||
// 2. 第二个返回值:需要发"异步平移事件"的任务 ID 列表(可能为空)。
|
||||
func deriveTaskUrgencyForRead(tasks []model.Task, now time.Time) ([]model.Task, []int) {
|
||||
// 1. 拷贝切片,避免修改调用方持有的原始数据。
|
||||
derived := make([]model.Task, len(tasks))
|
||||
copy(derived, tasks)
|
||||
|
||||
pendingPromoteTaskIDs := make([]int, 0, len(derived))
|
||||
|
||||
// 2. 逐条判断是否满足"自动平移"条件。
|
||||
for idx := range derived {
|
||||
current := &derived[idx]
|
||||
|
||||
// 2.1 已完成任务不参与平移。
|
||||
if current.IsCompleted {
|
||||
continue
|
||||
}
|
||||
// 2.2 没有分界线的任务不参与平移。
|
||||
if current.UrgencyThresholdAt == nil {
|
||||
continue
|
||||
}
|
||||
// 2.3 尚未到分界线,不平移。
|
||||
if current.UrgencyThresholdAt.After(now) {
|
||||
continue
|
||||
}
|
||||
|
||||
// 2.4 到线后,仅把"不紧急象限"平移到对应"紧急象限"。
|
||||
// 2.4.1 重要不紧急(2) -> 重要且紧急(1)
|
||||
// 2.4.2 不简单不重要(4) -> 简单不重要(3)
|
||||
switch current.Priority {
|
||||
case 2:
|
||||
current.Priority = 1
|
||||
pendingPromoteTaskIDs = append(pendingPromoteTaskIDs, current.ID)
|
||||
case 4:
|
||||
current.Priority = 3
|
||||
pendingPromoteTaskIDs = append(pendingPromoteTaskIDs, current.ID)
|
||||
default:
|
||||
// 2.4.3 其他优先级不处理(包含已经是 1/3 的情况)。
|
||||
}
|
||||
}
|
||||
return derived, pendingPromoteTaskIDs
|
||||
}
|
||||
|
||||
// tryEnqueueTaskUrgencyPromote 尝试发布"任务紧急性平移请求"事件。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责 Redis 去重锁 + outbox 发布;
|
||||
// 2. 不负责真正落库(由消费者负责);
|
||||
// 3. 发布失败时要释放本次抢到的去重锁,避免任务被长时间"误判已投递"。
|
||||
func (ts *TaskService) tryEnqueueTaskUrgencyPromote(ctx context.Context, userID int, taskIDs []int) {
|
||||
// 1. 基础兜底:无发布器或无候选任务时直接返回。
|
||||
if ts.eventPublisher == nil || userID <= 0 || len(taskIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 2. 先做任务 ID 清洗,避免无效 ID 参与去重与发布。
|
||||
validTaskIDs := compactPositiveUniqueTaskIDs(taskIDs)
|
||||
if len(validTaskIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 3. 逐个抢 SETNX 去重锁:
|
||||
// 3.1 抢到锁才允许进入本次发布;
|
||||
// 3.2 抢不到说明已有请求在途,本次跳过即可;
|
||||
// 3.3 抢锁失败只记录日志,不中断主流程。
|
||||
lockedTaskIDs := make([]int, 0, len(validTaskIDs))
|
||||
lockedKeys := make([]string, 0, len(validTaskIDs))
|
||||
for _, taskID := range validTaskIDs {
|
||||
lockKey := fmt.Sprintf(taskUrgencyPromoteDedupeKeyFmt, userID, taskID)
|
||||
locked, lockErr := ts.cache.AcquireLock(ctx, lockKey, taskUrgencyPromoteDedupeTTL)
|
||||
if lockErr != nil {
|
||||
log.Printf("任务平移去重锁获取失败: user_id=%d task_id=%d err=%v", userID, taskID, lockErr)
|
||||
continue
|
||||
}
|
||||
if !locked {
|
||||
continue
|
||||
}
|
||||
lockedTaskIDs = append(lockedTaskIDs, taskID)
|
||||
lockedKeys = append(lockedKeys, lockKey)
|
||||
}
|
||||
if len(lockedTaskIDs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 4. 发布 outbox 事件:这里只保证"成功入 outbox 或返回错误",不等待消费者执行完成。
|
||||
publishErr := eventsvc.PublishTaskUrgencyPromoteRequested(ctx, ts.eventPublisher, model.TaskUrgencyPromoteRequestedPayload{
|
||||
UserID: userID,
|
||||
TaskIDs: lockedTaskIDs,
|
||||
TriggeredAt: time.Now(),
|
||||
})
|
||||
if publishErr != nil {
|
||||
// 4.1 失败回滚:释放本次抢到的去重锁,避免后续请求因误锁而无法再投递。
|
||||
ts.releaseTaskPromoteLocks(lockedKeys)
|
||||
log.Printf("任务平移事件发布失败: user_id=%d task_ids=%v err=%v", userID, lockedTaskIDs, publishErr)
|
||||
return
|
||||
}
|
||||
|
||||
log.Printf("任务平移事件已发布: user_id=%d task_ids=%v", userID, lockedTaskIDs)
|
||||
}
|
||||
|
||||
// releaseTaskPromoteLocks 释放任务平移去重锁。
|
||||
//
|
||||
// 说明:
|
||||
// 1. 仅用于"发布失败回滚"场景;
|
||||
// 2. 使用 Background 避免请求上下文已取消时导致锁释放失败。
|
||||
func (ts *TaskService) releaseTaskPromoteLocks(lockKeys []string) {
|
||||
if len(lockKeys) == 0 {
|
||||
return
|
||||
}
|
||||
releaseCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
defer cancel()
|
||||
|
||||
for _, key := range lockKeys {
|
||||
if err := ts.cache.ReleaseLock(releaseCtx, key); err != nil {
|
||||
log.Printf("任务平移去重锁释放失败: key=%s err=%v", key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// compactPositiveUniqueTaskIDs 对任务 ID 做"过滤非正数 + 去重"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做参数清洗;
|
||||
// 2. 不承载业务规则判断。
|
||||
func compactPositiveUniqueTaskIDs(taskIDs []int) []int {
|
||||
return compactPositiveUniqueTaskIDsWithLimit(taskIDs, 0)
|
||||
}
|
||||
|
||||
// compactPositiveUniqueTaskIDsWithLimit 对任务 ID 做"过滤非正数 + 去重 + 可选限量"。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只做纯参数归一化,不查询任务、不判断权限;
|
||||
// 2. limit <= 0 表示不限制数量,供既有调用保持原行为;
|
||||
// 3. 达到 limit 后立即停止扫描,避免超长请求继续消耗 CPU。
|
||||
func compactPositiveUniqueTaskIDsWithLimit(taskIDs []int, limit int) []int {
|
||||
seen := make(map[int]struct{}, len(taskIDs))
|
||||
result := make([]int, 0, len(taskIDs))
|
||||
for _, taskID := range taskIDs {
|
||||
if taskID <= 0 {
|
||||
continue
|
||||
}
|
||||
if _, exists := seen[taskID]; exists {
|
||||
continue
|
||||
}
|
||||
seen[taskID] = struct{}{}
|
||||
result = append(result, taskID)
|
||||
if limit > 0 && len(result) >= limit {
|
||||
break
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// UpdateTask 更新用户指定任务的属性(部分更新)。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责参数校验:task_id 合法性、priority_group 范围;
|
||||
// 2. 负责将请求 DTO 转换为 DAO 层的 updates map;
|
||||
// 3. 空请求体(无字段需要更新)返回明确业务错误;
|
||||
// 4. 不负责缓存删除(由 GORM cache_deleter 回调自动处理)。
|
||||
func (ts *TaskService) UpdateTask(ctx context.Context, req *model.UserUpdateTaskRequest, userID int) (model.GetUserTaskResp, error) {
|
||||
// 1. 参数兜底。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return model.GetUserTaskResp{}, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 构造 updates map:只有非 nil 的字段才写入。
|
||||
updates := make(map[string]interface{})
|
||||
if req.Title != nil {
|
||||
updates["title"] = *req.Title
|
||||
}
|
||||
if req.PriorityGroup != nil {
|
||||
// 2.1 优先级范围校验:当前任务体系只允许 1~4。
|
||||
if *req.PriorityGroup < 1 || *req.PriorityGroup > 4 {
|
||||
return model.GetUserTaskResp{}, respond.InvalidPriority
|
||||
}
|
||||
// 2.2 JSON 字段名是 priority_group,数据库列名是 priority。
|
||||
updates["priority"] = *req.PriorityGroup
|
||||
}
|
||||
if req.DeadlineAt != nil {
|
||||
updates["deadline_at"] = *req.DeadlineAt
|
||||
}
|
||||
if req.UrgencyThresholdAt != nil {
|
||||
updates["urgency_threshold_at"] = *req.UrgencyThresholdAt
|
||||
}
|
||||
|
||||
// 3. 空更新检测:至少需要一个可更新字段。
|
||||
if len(updates) == 0 {
|
||||
return model.GetUserTaskResp{}, respond.TaskUpdateNoFields
|
||||
}
|
||||
|
||||
// 4. 调用 DAO 执行更新。
|
||||
updatedTask, err := ts.dao.UpdateTaskByID(ctx, userID, req.TaskID, updates)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return model.GetUserTaskResp{}, respond.WrongTaskID
|
||||
}
|
||||
return model.GetUserTaskResp{}, err
|
||||
}
|
||||
ts.syncActiveScheduleJobBestEffort(ctx, updatedTask)
|
||||
|
||||
// 5. 转换为响应 DTO。
|
||||
return conv.ModelToGetUserTaskResp(updatedTask), nil
|
||||
}
|
||||
|
||||
// DeleteTask 永久删除用户指定任务。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 负责入参校验与业务错误映射;
|
||||
// 2. 负责调用 DAO 执行硬删除;
|
||||
// 3. 任务不存在时返回幂等信息码(TaskAlreadyDeleted);
|
||||
// 4. 不负责缓存删除(由 GORM cache_deleter 回调自动处理)。
|
||||
func (ts *TaskService) DeleteTask(ctx context.Context, req *model.UserCompleteTaskRequest, userID int) (int, error) {
|
||||
// 1. 参数兜底。
|
||||
if req == nil || userID <= 0 || req.TaskID <= 0 {
|
||||
return 0, respond.WrongTaskID
|
||||
}
|
||||
|
||||
// 2. 调用 DAO 执行删除。
|
||||
deletedTask, err := ts.dao.DeleteTaskByID(ctx, userID, req.TaskID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// 2.1 任务不存在或不属于当前用户:按幂等语义返回信息码。
|
||||
return 0, respond.TaskAlreadyDeleted
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
ts.cancelActiveScheduleJobBestEffort(ctx, deletedTask.UserID, deletedTask.ID, "task_deleted")
|
||||
|
||||
return deletedTask.ID, nil
|
||||
}
|
||||
@@ -1,91 +0,0 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// syncActiveScheduleJobBestEffort 在任务变更后同步主动调度 due job。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只维护 important_urgent_task 的 job,不直接触发主动调度主链路;
|
||||
// 2. 任务未完成且存在 urgency_threshold_at 时 upsert pending job;
|
||||
// 3. 任务已完成或阈值为空时取消当前 pending job;
|
||||
// 4. 当前任务接口尚未整体事务化,job 同步失败只记日志,避免任务主写入出现“已落库但接口失败”的更差体验。
|
||||
func (ts *TaskService) syncActiveScheduleJobBestEffort(ctx context.Context, task *model.Task) {
|
||||
if ts == nil || ts.activeScheduleDAO == nil || task == nil {
|
||||
return
|
||||
}
|
||||
if task.IsCompleted || task.UrgencyThresholdAt == nil {
|
||||
ts.cancelActiveScheduleJobBestEffort(ctx, task.UserID, task.ID, "task_not_schedulable")
|
||||
return
|
||||
}
|
||||
|
||||
job := &model.ActiveScheduleJob{
|
||||
ID: activeScheduleJobID(task.UserID, task.ID),
|
||||
UserID: task.UserID,
|
||||
TaskID: task.ID,
|
||||
TriggerType: model.ActiveScheduleTriggerTypeImportantUrgentTask,
|
||||
Status: model.ActiveScheduleJobStatusPending,
|
||||
TriggerAt: *task.UrgencyThresholdAt,
|
||||
DedupeKey: activeScheduleTriggerDedupeKey(task.UserID, task.ID, *task.UrgencyThresholdAt),
|
||||
TraceID: activeScheduleTraceID(task.UserID, task.ID),
|
||||
}
|
||||
if err := ts.activeScheduleDAO.CreateOrUpdateJob(ctx, job); err != nil {
|
||||
log.Printf("主动调度 job upsert 失败: user_id=%d task_id=%d err=%v", task.UserID, task.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// cancelActiveScheduleJobBestEffort 取消任务当前待触发 job。
|
||||
//
|
||||
// 职责边界:
|
||||
// 1. 只取消 pending job,历史 triggered/skipped/failed 记录保留审计;
|
||||
// 2. 找不到 pending job 属于正常幂等场景;
|
||||
// 3. reason 只进入 last_error_code,方便后续排障知道取消来源。
|
||||
func (ts *TaskService) cancelActiveScheduleJobBestEffort(ctx context.Context, userID int, taskID int, reason string) {
|
||||
if ts == nil || ts.activeScheduleDAO == nil || userID <= 0 || taskID <= 0 {
|
||||
return
|
||||
}
|
||||
job, err := ts.activeScheduleDAO.FindPendingJobByTask(ctx, userID, taskID)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return
|
||||
}
|
||||
log.Printf("主动调度 pending job 查询失败: user_id=%d task_id=%d err=%v", userID, taskID, err)
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
updates := map[string]any{
|
||||
"status": model.ActiveScheduleJobStatusCanceled,
|
||||
"last_error_code": reason,
|
||||
"last_scanned_at": &now,
|
||||
}
|
||||
if err = ts.activeScheduleDAO.UpdateJobFields(ctx, job.ID, updates); err != nil {
|
||||
log.Printf("主动调度 pending job 取消失败: user_id=%d task_id=%d job_id=%s err=%v", userID, taskID, job.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
func activeScheduleJobID(userID int, taskID int) string {
|
||||
return fmt.Sprintf("asj_task_%d_%d", userID, taskID)
|
||||
}
|
||||
|
||||
func activeScheduleTraceID(userID int, taskID int) string {
|
||||
return fmt.Sprintf("trace_active_task_%d_%d", userID, taskID)
|
||||
}
|
||||
|
||||
func activeScheduleTriggerDedupeKey(userID int, taskID int, triggerAt time.Time) string {
|
||||
windowStart := triggerAt.Truncate(30 * time.Minute)
|
||||
return fmt.Sprintf("%d:%s:%s:%d:%s",
|
||||
userID,
|
||||
model.ActiveScheduleTriggerTypeImportantUrgentTask,
|
||||
model.ActiveScheduleTargetTypeTaskPool,
|
||||
taskID,
|
||||
windowStart.Format(time.RFC3339),
|
||||
)
|
||||
}
|
||||
@@ -6,10 +6,10 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/ports"
|
||||
"github.com/LoveLosita/smartflow/backend/services/active_scheduler/core/trigger"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
)
|
||||
|
||||
// IsPreviewExpired 判断 preview 是否已经超过确认有效期。
|
||||
|
||||
@@ -8,8 +8,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/LoveLosita/smartflow/backend/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/model"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/conv"
|
||||
"github.com/LoveLosita/smartflow/backend/services/runtime/model"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
@@ -2,6 +2,8 @@ package feedbacklocate
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
@@ -102,8 +104,9 @@ func (s *Service) Resolve(ctx context.Context, req Request) (Result, error) {
|
||||
}
|
||||
|
||||
messages := llmservice.BuildSystemUserMessages(strings.TrimSpace(locateSystemPrompt), nil, userPrompt)
|
||||
invokeCtx := llmservice.WithBillingContext(ctx, buildFeedbackLocateBillingContext(req))
|
||||
resp, rawResult, err := llmservice.GenerateJSON[llmResponse](
|
||||
ctx,
|
||||
invokeCtx,
|
||||
s.client,
|
||||
messages,
|
||||
llmservice.GenerateOptions{
|
||||
@@ -365,3 +368,21 @@ func minInt(left, right int) int {
|
||||
}
|
||||
return right
|
||||
}
|
||||
|
||||
func buildFeedbackLocateBillingContext(req Request) llmservice.BillingContext {
|
||||
if req.UserID <= 0 {
|
||||
return llmservice.BillingContext{
|
||||
Scene: "active_scheduler_feedback_locate",
|
||||
ModelAlias: "active_scheduler_feedback_locate",
|
||||
}
|
||||
}
|
||||
sum := sha1.Sum([]byte(strings.TrimSpace(req.UserMessage) + "|" + strings.TrimSpace(req.PendingQuestion)))
|
||||
requestID := fmt.Sprintf("active_scheduler_feedback_locate:%d:%s", req.UserID, hex.EncodeToString(sum[:]))
|
||||
return llmservice.BillingContext{
|
||||
UserID: uint64(req.UserID),
|
||||
EventID: requestID,
|
||||
Scene: "active_scheduler_feedback_locate",
|
||||
RequestID: requestID,
|
||||
ModelAlias: "active_scheduler_feedback_locate",
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user