Files
smartmate/docker-compose.yml
Losita 61db646805 Version: 0.9.80.dev.260506
后端:
1. LLM 独立服务与统一计费出口落地:新增 `cmd/llm`、`client/llm` 与 `services/llm/rpc`,补齐 BillingContext、CreditBalanceGuard、价格规则解析、stream usage 归集与 `credit.charge.requested` outbox 发布,active-scheduler / agent / course / memory / gateway fallback 全部改走 llm zrpc,不再各自本地初始化模型。
2. TokenStore 收口为 Credit 权威账本:新增 credit account / ledger / product / order / price-rule / reward-rule 能力与 Redis 快照缓存,扩展 tokenstore rpc/client 支撑余额快照、消耗看板、商品、订单、流水、价格规则和奖励规则,并接入 LLM charge 事件消费完成 Credit 扣费落账。
3. 计费旧链路下线与网关切口切换:`/token-store` 语义整体切到 `/credit-store`,agent chat 移除旧 TokenQuotaGuard,userauth 的 CheckTokenQuota / AdjustTokenUsage 改为废弃,聊天历史落库不再同步旧 token 额度账本,course 图片解析请求补 user_id 进入新计费口径。

前端:
4. 计划广场从 mock 数据切到真实接口:新增 forum api/types,首页支持真实列表、标签、搜索、防抖、点赞、导入和发布计划,详情页补齐帖子详情、评论树、回复和删除评论链路,同时补上“至少一个标签”的前后端约束与默认标签兜底。
5. 商店页切到 Credit 体系并重做展示:顶部改为余额 + Credit/Token 消耗看板,支持 24h/7d/30d/all 周期切换;套餐区展示原价与当前价;历史区改为当前用户 Credit 流水并支持查看更多,整体视觉和交互同步收口。

仓库:
6. 配置与本地启动体系补齐 llm / outbox 编排:`config.example.yaml` 增加 llm rpc 和统一 outbox service 配置,`dev-common.ps1` 把 llm 纳入多服务依赖并自动建 Kafka topic,`docker-compose.yml` 同步初始化 agent/task/memory/active-scheduler/notification/taskclass-forum/llm/token-store 全量 outbox topic。
2026-05-06 20:16:53 +08:00

184 lines
5.0 KiB
YAML

services:
mysql:
image: mysql:8.0
container_name: smartflow-mysql
restart: unless-stopped
environment:
MYSQL_ROOT_PASSWORD: root_password_123
MYSQL_DATABASE: smartflow
MYSQL_USER: smartflow_user
MYSQL_PASSWORD: smartflow_password_456
ports:
- "3306:3306"
command: --default-authentication-plugin=mysql_native_password
volumes:
- mysql_data:/var/lib/mysql
healthcheck:
test: ["CMD-SHELL", "mysqladmin ping -h localhost -uroot -proot_password_123"]
interval: 10s
timeout: 5s
retries: 10
redis:
image: redis:7
container_name: smartflow-redis
restart: unless-stopped
command: redis-server --appendonly yes --requirepass redis_password_789
ports:
- "6379:6379"
volumes:
- redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "-a", "redis_password_789", "ping"]
interval: 10s
timeout: 5s
retries: 10
kafka:
image: apache/kafka:3.7.2
container_name: smartflow-kafka
restart: unless-stopped
ports:
- "9092:9092"
environment:
KAFKA_NODE_ID: 1
KAFKA_PROCESS_ROLES: broker,controller
CLUSTER_ID: MkU3OEVBNTcwNTJENDM2Qk
KAFKA_LISTENERS: INTERNAL://:9094,EXTERNAL://:9092,CONTROLLER://:9093
KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9094,EXTERNAL://localhost:9092
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER
KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka:9093
KAFKA_AUTO_CREATE_TOPICS_ENABLE: "true"
KAFKA_NUM_PARTITIONS: 3
KAFKA_DEFAULT_REPLICATION_FACTOR: 1
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_LOG_DIRS: /var/lib/kafka/data
volumes:
- kafka_data:/var/lib/kafka/data
healthcheck:
test: ["CMD-SHELL", "/opt/kafka/bin/kafka-topics.sh --bootstrap-server localhost:9092 --list >/dev/null 2>&1"]
interval: 10s
timeout: 5s
retries: 15
kafka-init:
image: apache/kafka:3.7.2
container_name: smartflow-kafka-init
depends_on:
kafka:
condition: service_healthy
entrypoint:
- /bin/bash
- -lc
- |
set -e
for topic in \
smartflow.agent.outbox \
smartflow.task.outbox \
smartflow.memory.outbox \
smartflow.active-scheduler.outbox \
smartflow.notification.outbox \
smartflow.taskclass-forum.outbox \
smartflow.llm.outbox \
smartflow.token-store.outbox
do
/opt/kafka/bin/kafka-topics.sh \
--bootstrap-server kafka:9094 \
--create \
--if-not-exists \
--topic "$$topic" \
--partitions 3 \
--replication-factor 1
done
restart: "no"
etcd:
image: quay.io/coreos/etcd:v3.5.5
container_name: smartflow-etcd
restart: unless-stopped
environment:
ETCD_AUTO_COMPACTION_MODE: revision
ETCD_AUTO_COMPACTION_RETENTION: "1000"
ETCD_QUOTA_BACKEND_BYTES: "4294967296"
ETCD_SNAPSHOT_COUNT: "50000"
command: >
etcd
-advertise-client-urls=http://etcd:2379
-listen-client-urls=http://0.0.0.0:2379
--data-dir=/etcd
volumes:
- etcd_data:/etcd
healthcheck:
test: ["CMD", "etcdctl", "endpoint", "health"]
interval: 10s
timeout: 5s
retries: 20
minio:
image: minio/minio:RELEASE.2023-03-20T20-16-18Z
container_name: smartflow-minio
restart: unless-stopped
environment:
MINIO_ROOT_USER: minioadmin
MINIO_ROOT_PASSWORD: minioadmin
command: minio server /minio_data --console-address ":9001"
ports:
- "9000:9000"
- "9001:9001"
volumes:
- minio_data:/minio_data
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 10s
timeout: 5s
retries: 20
milvus-standalone:
image: milvusdb/milvus:v2.4.4
container_name: smartflow-milvus
restart: unless-stopped
command: ["milvus", "run", "standalone"]
environment:
ETCD_USE_EMBED: "false"
ETCD_ENDPOINTS: etcd:2379
MINIO_ADDRESS: minio:9000
ports:
- "19530:19530"
- "9091:9091"
volumes:
- milvus_data:/var/lib/milvus
depends_on:
etcd:
condition: service_healthy
minio:
condition: service_healthy
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9091/healthz"]
interval: 10s
timeout: 5s
retries: 30
attu:
image: zilliz/attu:v2.4.3
container_name: smartflow-attu
restart: unless-stopped
ports:
- "8000:3000"
environment:
MILVUS_URL: smartflow-milvus:19530
depends_on:
milvus-standalone:
condition: service_healthy
volumes:
mysql_data:
redis_data:
kafka_data:
etcd_data:
minio_data:
milvus_data: